From 9f43e2a99f32bc11e9be16192450b3598030fbce Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 2 Mar 2026 11:03:13 +0000 Subject: [PATCH 01/20] stdarch-verify: re-add runtime test check This was accidentally removed in 713444d. --- .../crates/stdarch-verify/tests/arm.rs | 4706 ++++++++++++++++- 1 file changed, 4482 insertions(+), 224 deletions(-) diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index 3ef9ce2a38b6..c5744de3f644 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -183,230 +183,20 @@ fn verify_all_signatures() { let mut all_valid = true; for rust in FUNCTIONS { if !rust.has_test { - let skip = [ - "vaddq_s64", - "vaddq_u64", - "vrsqrte_f32", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "udf", - "_clz_u8", - "_clz_u16", - "_clz_u32", - "_rbit_u32", - "_rev_u16", - "_rev_u32", - "__breakpoint", - "vpminq_f32", - "vpminq_f64", - "vpmaxq_f32", - "vpmaxq_f64", - "vcombine_s8", - "vcombine_s16", - "vcombine_s32", - "vcombine_s64", - "vcombine_u8", - "vcombine_u16", - "vcombine_u32", - "vcombine_u64", - "vcombine_p64", - "vcombine_f32", - "vcombine_p8", - "vcombine_p16", - "vcombine_f64", - "vtbl1_s8", - "vtbl1_u8", - "vtbl1_p8", - "vtbl2_s8", - "vtbl2_u8", - "vtbl2_p8", - "vtbl3_s8", - "vtbl3_u8", - "vtbl3_p8", - "vtbl4_s8", - "vtbl4_u8", - "vtbl4_p8", - "vtbx1_s8", - "vtbx1_u8", - "vtbx1_p8", - "vtbx2_s8", - "vtbx2_u8", - "vtbx2_p8", - "vtbx3_s8", - "vtbx3_u8", - "vtbx3_p8", - "vtbx4_s8", - "vtbx4_u8", - "vtbx4_p8", - "vqtbl1_s8", - "vqtbl1q_s8", - "vqtbl1_u8", - "vqtbl1q_u8", - "vqtbl1_p8", - "vqtbl1q_p8", - "vqtbx1_s8", - "vqtbx1q_s8", - "vqtbx1_u8", - "vqtbx1q_u8", - "vqtbx1_p8", - "vqtbx1q_p8", - "vqtbl2_s8", - "vqtbl2q_s8", - "vqtbl2_u8", - "vqtbl2q_u8", - "vqtbl2_p8", - "vqtbl2q_p8", - "vqtbx2_s8", - "vqtbx2q_s8", - "vqtbx2_u8", - "vqtbx2q_u8", - "vqtbx2_p8", - "vqtbx2q_p8", - "vqtbl3_s8", - "vqtbl3q_s8", - "vqtbl3_u8", - "vqtbl3q_u8", - "vqtbl3_p8", - "vqtbl3q_p8", - "vqtbx3_s8", - "vqtbx3q_s8", - "vqtbx3_u8", - "vqtbx3q_u8", - "vqtbx3_p8", - "vqtbx3q_p8", - "vqtbl4_s8", - "vqtbl4q_s8", - "vqtbl4_u8", - "vqtbl4q_u8", - "vqtbl4_p8", - "vqtbl4q_p8", - "vqtbx4_s8", - "vqtbx4q_s8", - "vqtbx4_u8", - "vqtbx4q_u8", - "vqtbx4_p8", - "vqtbx4q_p8", - "brk", - "_rev_u64", - "_clz_u64", - "_rbit_u64", - "_cls_u32", - "_cls_u64", - "_prefetch", - "vsli_n_s8", - "vsliq_n_s8", - "vsli_n_s16", - "vsliq_n_s16", - "vsli_n_s32", - "vsliq_n_s32", - "vsli_n_s64", - "vsliq_n_s64", - "vsli_n_u8", - "vsliq_n_u8", - "vsli_n_u16", - "vsliq_n_u16", - "vsli_n_u32", - "vsliq_n_u32", - "vsli_n_u64", - "vsliq_n_u64", - "vsli_n_p8", - "vsliq_n_p8", - "vsli_n_p16", - "vsliq_n_p16", - "vsli_n_p64", - "vsliq_n_p64", - "vsri_n_s8", - "vsriq_n_s8", - "vsri_n_s16", - "vsriq_n_s16", - "vsri_n_s32", - "vsriq_n_s32", - "vsri_n_s64", - "vsriq_n_s64", - "vsri_n_u8", - "vsriq_n_u8", - "vsri_n_u16", - "vsriq_n_u16", - "vsri_n_u32", - "vsriq_n_u32", - "vsri_n_u64", - "vsriq_n_u64", - "vsri_n_p8", - "vsriq_n_p8", - "vsri_n_p16", - "vsriq_n_p16", - "vsri_n_p64", - "vsriq_n_p64", - "__smulbb", - "__smultb", - "__smulbt", - "__smultt", - "__smulwb", - "__smulwt", - "__qadd", - "__qsub", - "__qdbl", - "__smlabb", - "__smlabt", - "__smlatb", - "__smlatt", - "__smlawb", - "__smlawt", - "__qadd8", - "__qsub8", - "__qsub16", - "__qadd16", - "__qasx", - "__qsax", - "__sadd16", - "__sadd8", - "__smlad", - "__smlsd", - "__sasx", - "__sel", - "__shadd8", - "__shadd16", - "__shsub8", - "__usub8", - "__ssub8", - "__shsub16", - "__smuad", - "__smuadx", - "__smusd", - "__smusdx", - "__usad8", - "__usada8", - "__ldrex", - "__strex", - "__ldrexb", - "__strexb", - "__ldrexh", - "__strexh", - "__clrex", - "__dbg", - ]; + if !SKIP_RUNTIME_TESTS.contains(&rust.name) { + println!( + "missing run-time test named `test_{}` for `{}`", + { + let mut id = rust.name; + while id.starts_with('_') { + id = &id[1..]; + } + id + }, + rust.name + ); + all_valid = false; + } } // Skip some intrinsics that aren't NEON and are located in different @@ -743,3 +533,4471 @@ fn parse_ty_base(s: &str) -> &'static Type { _ => panic!("failed to parse json type {s:?}"), } } + +// FIXME(arm-maintainers): With the advent of the `intrinsic-test` tool, new tests of this kind +// are no longer being added and just adding to this list indefinitely isn't the best solution for +// dealing with that. +static SKIP_RUNTIME_TESTS: &'static [&'static str] = &[ + "vaddq_s64", + "vaddq_u64", + "vrsqrte_f32", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "udf", + "_clz_u8", + "_clz_u16", + "_clz_u32", + "_rbit_u32", + "_rev_u16", + "_rev_u32", + "__breakpoint", + "vpminq_f32", + "vpminq_f64", + "vpmaxq_f32", + "vpmaxq_f64", + "vcombine_s8", + "vcombine_s16", + "vcombine_s32", + "vcombine_s64", + "vcombine_u8", + "vcombine_u16", + "vcombine_u32", + "vcombine_u64", + "vcombine_p64", + "vcombine_f32", + "vcombine_p8", + "vcombine_p16", + "vcombine_f64", + "vtbl1_s8", + "vtbl1_u8", + "vtbl1_p8", + "vtbl2_s8", + "vtbl2_u8", + "vtbl2_p8", + "vtbl3_s8", + "vtbl3_u8", + "vtbl3_p8", + "vtbl4_s8", + "vtbl4_u8", + "vtbl4_p8", + "vtbx1_s8", + "vtbx1_u8", + "vtbx1_p8", + "vtbx2_s8", + "vtbx2_u8", + "vtbx2_p8", + "vtbx3_s8", + "vtbx3_u8", + "vtbx3_p8", + "vtbx4_s8", + "vtbx4_u8", + "vtbx4_p8", + "vqtbl1_s8", + "vqtbl1q_s8", + "vqtbl1_u8", + "vqtbl1q_u8", + "vqtbl1_p8", + "vqtbl1q_p8", + "vqtbx1_s8", + "vqtbx1q_s8", + "vqtbx1_u8", + "vqtbx1q_u8", + "vqtbx1_p8", + "vqtbx1q_p8", + "vqtbl2_s8", + "vqtbl2q_s8", + "vqtbl2_u8", + "vqtbl2q_u8", + "vqtbl2_p8", + "vqtbl2q_p8", + "vqtbx2_s8", + "vqtbx2q_s8", + "vqtbx2_u8", + "vqtbx2q_u8", + "vqtbx2_p8", + "vqtbx2q_p8", + "vqtbl3_s8", + "vqtbl3q_s8", + "vqtbl3_u8", + "vqtbl3q_u8", + "vqtbl3_p8", + "vqtbl3q_p8", + "vqtbx3_s8", + "vqtbx3q_s8", + "vqtbx3_u8", + "vqtbx3q_u8", + "vqtbx3_p8", + "vqtbx3q_p8", + "vqtbl4_s8", + "vqtbl4q_s8", + "vqtbl4_u8", + "vqtbl4q_u8", + "vqtbl4_p8", + "vqtbl4q_p8", + "vqtbx4_s8", + "vqtbx4q_s8", + "vqtbx4_u8", + "vqtbx4q_u8", + "vqtbx4_p8", + "vqtbx4q_p8", + "brk", + "_rev_u64", + "_clz_u64", + "_rbit_u64", + "_cls_u32", + "_cls_u64", + "_prefetch", + "vsli_n_s8", + "vsliq_n_s8", + "vsli_n_s16", + "vsliq_n_s16", + "vsli_n_s32", + "vsliq_n_s32", + "vsli_n_s64", + "vsliq_n_s64", + "vsli_n_u8", + "vsliq_n_u8", + "vsli_n_u16", + "vsliq_n_u16", + "vsli_n_u32", + "vsliq_n_u32", + "vsli_n_u64", + "vsliq_n_u64", + "vsli_n_p8", + "vsliq_n_p8", + "vsli_n_p16", + "vsliq_n_p16", + "vsli_n_p64", + "vsliq_n_p64", + "vsri_n_s8", + "vsriq_n_s8", + "vsri_n_s16", + "vsriq_n_s16", + "vsri_n_s32", + "vsriq_n_s32", + "vsri_n_s64", + "vsriq_n_s64", + "vsri_n_u8", + "vsriq_n_u8", + "vsri_n_u16", + "vsriq_n_u16", + "vsri_n_u32", + "vsriq_n_u32", + "vsri_n_u64", + "vsriq_n_u64", + "vsri_n_p8", + "vsriq_n_p8", + "vsri_n_p16", + "vsriq_n_p16", + "vsri_n_p64", + "vsriq_n_p64", + "__smulbb", + "__smultb", + "__smulbt", + "__smultt", + "__smulwb", + "__smulwt", + "__qadd", + "__qsub", + "__qdbl", + "__smlabb", + "__smlabt", + "__smlatb", + "__smlatt", + "__smlawb", + "__smlawt", + "__qadd8", + "__qsub8", + "__qsub16", + "__qadd16", + "__qasx", + "__qsax", + "__sadd16", + "__sadd8", + "__smlad", + "__smlsd", + "__sasx", + "__sel", + "__shadd8", + "__shadd16", + "__shsub8", + "__usub8", + "__ssub8", + "__shsub16", + "__smuad", + "__smuadx", + "__smusd", + "__smusdx", + "__usad8", + "__usada8", + "__ldrex", + "__strex", + "__ldrexb", + "__strexb", + "__ldrexh", + "__strexh", + "__clrex", + "__dbg", + "__crc32cd", + "__crc32d", + "__jcvt", + "vabal_high_s8", + "vabal_high_s16", + "vabal_high_s32", + "vabal_high_u8", + "vabal_high_u16", + "vabal_high_u32", + "vabd_f64", + "vabdq_f64", + "vabdd_f64", + "vabds_f32", + "vabdh_f16", + "vabdl_high_s16", + "vabdl_high_s32", + "vabdl_high_s8", + "vabdl_high_u8", + "vabdl_high_u16", + "vabdl_high_u32", + "vabs_f64", + "vabsq_f64", + "vabs_s64", + "vabsq_s64", + "vabsd_s64", + "vaddlv_s16", + "vaddlvq_s16", + "vaddlvq_s32", + "vaddlv_s32", + "vaddlv_s8", + "vaddlvq_s8", + "vaddlv_u16", + "vaddlvq_u16", + "vaddlvq_u32", + "vaddlv_u32", + "vaddlv_u8", + "vaddlvq_u8", + "vaddv_f32", + "vaddvq_f32", + "vaddvq_f64", + "vaddv_s32", + "vaddv_s8", + "vaddvq_s8", + "vaddv_s16", + "vaddvq_s16", + "vaddvq_s32", + "vaddv_u32", + "vaddv_u8", + "vaddvq_u8", + "vaddv_u16", + "vaddvq_u16", + "vaddvq_u32", + "vaddvq_s64", + "vaddvq_u64", + "vamax_f16", + "vamaxq_f16", + "vamax_f32", + "vamaxq_f32", + "vamaxq_f64", + "vamin_f16", + "vaminq_f16", + "vamin_f32", + "vaminq_f32", + "vaminq_f64", + "vbcaxq_s8", + "vbcaxq_s16", + "vbcaxq_s32", + "vbcaxq_s64", + "vbcaxq_u8", + "vbcaxq_u16", + "vbcaxq_u32", + "vbcaxq_u64", + "vcadd_rot270_f16", + "vcaddq_rot270_f16", + "vcadd_rot270_f32", + "vcaddq_rot270_f32", + "vcaddq_rot270_f64", + "vcadd_rot90_f16", + "vcaddq_rot90_f16", + "vcadd_rot90_f32", + "vcaddq_rot90_f32", + "vcaddq_rot90_f64", + "vcage_f64", + "vcageq_f64", + "vcaged_f64", + "vcages_f32", + "vcageh_f16", + "vcagt_f64", + "vcagtq_f64", + "vcagtd_f64", + "vcagts_f32", + "vcagth_f16", + "vcale_f64", + "vcaleq_f64", + "vcaled_f64", + "vcales_f32", + "vcaleh_f16", + "vcalt_f64", + "vcaltq_f64", + "vcaltd_f64", + "vcalts_f32", + "vcalth_f16", + "vceq_f64", + "vceqq_f64", + "vceq_s64", + "vceqq_s64", + "vceq_u64", + "vceqq_u64", + "vceq_p64", + "vceqq_p64", + "vceqd_f64", + "vceqs_f32", + "vceqd_s64", + "vceqd_u64", + "vceqh_f16", + "vceqz_f16", + "vceqzq_f16", + "vceqz_f32", + "vceqzq_f32", + "vceqz_f64", + "vceqzq_f64", + "vceqz_s8", + "vceqzq_s8", + "vceqz_s16", + "vceqzq_s16", + "vceqz_s32", + "vceqzq_s32", + "vceqz_s64", + "vceqzq_s64", + "vceqz_p8", + "vceqzq_p8", + "vceqz_p64", + "vceqzq_p64", + "vceqz_u8", + "vceqzq_u8", + "vceqz_u16", + "vceqzq_u16", + "vceqz_u32", + "vceqzq_u32", + "vceqz_u64", + "vceqzq_u64", + "vceqzd_s64", + "vceqzd_u64", + "vceqzh_f16", + "vceqzs_f32", + "vceqzd_f64", + "vcge_f64", + "vcgeq_f64", + "vcge_s64", + "vcgeq_s64", + "vcge_u64", + "vcgeq_u64", + "vcged_f64", + "vcges_f32", + "vcged_s64", + "vcged_u64", + "vcgeh_f16", + "vcgez_f32", + "vcgezq_f32", + "vcgez_f64", + "vcgezq_f64", + "vcgez_s8", + "vcgezq_s8", + "vcgez_s16", + "vcgezq_s16", + "vcgez_s32", + "vcgezq_s32", + "vcgez_s64", + "vcgezq_s64", + "vcgezd_f64", + "vcgezs_f32", + "vcgezd_s64", + "vcgezh_f16", + "vcgt_f64", + "vcgtq_f64", + "vcgt_s64", + "vcgtq_s64", + "vcgt_u64", + "vcgtq_u64", + "vcgtd_f64", + "vcgts_f32", + "vcgtd_s64", + "vcgtd_u64", + "vcgth_f16", + "vcgtz_f32", + "vcgtzq_f32", + "vcgtz_f64", + "vcgtzq_f64", + "vcgtz_s8", + "vcgtzq_s8", + "vcgtz_s16", + "vcgtzq_s16", + "vcgtz_s32", + "vcgtzq_s32", + "vcgtz_s64", + "vcgtzq_s64", + "vcgtzd_f64", + "vcgtzs_f32", + "vcgtzd_s64", + "vcgtzh_f16", + "vcle_f64", + "vcleq_f64", + "vcle_s64", + "vcleq_s64", + "vcle_u64", + "vcleq_u64", + "vcled_f64", + "vcles_f32", + "vcled_u64", + "vcled_s64", + "vcleh_f16", + "vclez_f32", + "vclezq_f32", + "vclez_f64", + "vclezq_f64", + "vclez_s8", + "vclezq_s8", + "vclez_s16", + "vclezq_s16", + "vclez_s32", + "vclezq_s32", + "vclez_s64", + "vclezq_s64", + "vclezd_f64", + "vclezs_f32", + "vclezd_s64", + "vclezh_f16", + "vclt_f64", + "vcltq_f64", + "vclt_s64", + "vcltq_s64", + "vclt_u64", + "vcltq_u64", + "vcltd_u64", + "vcltd_s64", + "vclth_f16", + "vclts_f32", + "vcltd_f64", + "vcltz_f32", + "vcltzq_f32", + "vcltz_f64", + "vcltzq_f64", + "vcltz_s8", + "vcltzq_s8", + "vcltz_s16", + "vcltzq_s16", + "vcltz_s32", + "vcltzq_s32", + "vcltz_s64", + "vcltzq_s64", + "vcltzd_f64", + "vcltzs_f32", + "vcltzd_s64", + "vcltzh_f16", + "vcmla_f16", + "vcmlaq_f16", + "vcmla_f32", + "vcmlaq_f32", + "vcmlaq_f64", + "vcmla_lane_f16", + "vcmlaq_lane_f16", + "vcmla_lane_f32", + "vcmlaq_lane_f32", + "vcmla_laneq_f16", + "vcmlaq_laneq_f16", + "vcmla_laneq_f32", + "vcmlaq_laneq_f32", + "vcmla_rot180_f16", + "vcmlaq_rot180_f16", + "vcmla_rot180_f32", + "vcmlaq_rot180_f32", + "vcmlaq_rot180_f64", + "vcmla_rot180_lane_f16", + "vcmlaq_rot180_lane_f16", + "vcmla_rot180_lane_f32", + "vcmlaq_rot180_lane_f32", + "vcmla_rot180_laneq_f16", + "vcmlaq_rot180_laneq_f16", + "vcmla_rot180_laneq_f32", + "vcmlaq_rot180_laneq_f32", + "vcmla_rot270_f16", + "vcmlaq_rot270_f16", + "vcmla_rot270_f32", + "vcmlaq_rot270_f32", + "vcmlaq_rot270_f64", + "vcmla_rot270_lane_f16", + "vcmlaq_rot270_lane_f16", + "vcmla_rot270_lane_f32", + "vcmlaq_rot270_lane_f32", + "vcmla_rot270_laneq_f16", + "vcmlaq_rot270_laneq_f16", + "vcmla_rot270_laneq_f32", + "vcmlaq_rot270_laneq_f32", + "vcmla_rot90_f16", + "vcmlaq_rot90_f16", + "vcmla_rot90_f32", + "vcmlaq_rot90_f32", + "vcmlaq_rot90_f64", + "vcmla_rot90_lane_f16", + "vcmlaq_rot90_lane_f16", + "vcmla_rot90_lane_f32", + "vcmlaq_rot90_lane_f32", + "vcmla_rot90_laneq_f16", + "vcmlaq_rot90_laneq_f16", + "vcmla_rot90_laneq_f32", + "vcmlaq_rot90_laneq_f32", + "vcopy_lane_f32", + "vcopy_lane_s8", + "vcopy_lane_s16", + "vcopy_lane_s32", + "vcopy_lane_u8", + "vcopy_lane_u16", + "vcopy_lane_u32", + "vcopy_lane_p8", + "vcopy_lane_p16", + "vcopy_laneq_f32", + "vcopy_laneq_s8", + "vcopy_laneq_s16", + "vcopy_laneq_s32", + "vcopy_laneq_u8", + "vcopy_laneq_u16", + "vcopy_laneq_u32", + "vcopy_laneq_p8", + "vcopy_laneq_p16", + "vcopyq_lane_f32", + "vcopyq_lane_f64", + "vcopyq_lane_s64", + "vcopyq_lane_u64", + "vcopyq_lane_p64", + "vcopyq_lane_s8", + "vcopyq_lane_s16", + "vcopyq_lane_s32", + "vcopyq_lane_u8", + "vcopyq_lane_u16", + "vcopyq_lane_u32", + "vcopyq_lane_p8", + "vcopyq_lane_p16", + "vcopyq_laneq_f32", + "vcopyq_laneq_f64", + "vcopyq_laneq_s8", + "vcopyq_laneq_s16", + "vcopyq_laneq_s32", + "vcopyq_laneq_s64", + "vcopyq_laneq_u8", + "vcopyq_laneq_u16", + "vcopyq_laneq_u32", + "vcopyq_laneq_u64", + "vcopyq_laneq_p8", + "vcopyq_laneq_p16", + "vcopyq_laneq_p64", + "vcreate_f64", + "vcvt_f32_f64", + "vcvt_f64_f32", + "vcvt_f64_s64", + "vcvtq_f64_s64", + "vcvt_f64_u64", + "vcvtq_f64_u64", + "vcvt_high_f16_f32", + "vcvt_high_f32_f16", + "vcvt_high_f32_f64", + "vcvt_high_f64_f32", + "vcvt_n_f64_s64", + "vcvtq_n_f64_s64", + "vcvt_n_f64_u64", + "vcvtq_n_f64_u64", + "vcvt_n_s64_f64", + "vcvtq_n_s64_f64", + "vcvt_n_u64_f64", + "vcvtq_n_u64_f64", + "vcvt_s64_f64", + "vcvtq_s64_f64", + "vcvt_u64_f64", + "vcvtq_u64_f64", + "vcvta_s16_f16", + "vcvtaq_s16_f16", + "vcvta_s32_f32", + "vcvtaq_s32_f32", + "vcvta_s64_f64", + "vcvtaq_s64_f64", + "vcvta_u16_f16", + "vcvtaq_u16_f16", + "vcvta_u32_f32", + "vcvtaq_u32_f32", + "vcvta_u64_f64", + "vcvtaq_u64_f64", + "vcvtah_s16_f16", + "vcvtah_s32_f16", + "vcvtah_s64_f16", + "vcvtah_u16_f16", + "vcvtah_u32_f16", + "vcvtah_u64_f16", + "vcvtas_s32_f32", + "vcvtad_s64_f64", + "vcvtas_u32_f32", + "vcvtad_u64_f64", + "vcvtd_f64_s64", + "vcvts_f32_s32", + "vcvth_f16_s16", + "vcvth_f16_s32", + "vcvth_f16_s64", + "vcvth_f16_u16", + "vcvth_f16_u32", + "vcvth_f16_u64", + "vcvth_n_f16_s16", + "vcvth_n_f16_s32", + "vcvth_n_f16_s64", + "vcvth_n_f16_u16", + "vcvth_n_f16_u32", + "vcvth_n_f16_u64", + "vcvth_n_s16_f16", + "vcvth_n_s32_f16", + "vcvth_n_s64_f16", + "vcvth_n_u16_f16", + "vcvth_n_u32_f16", + "vcvth_n_u64_f16", + "vcvth_s16_f16", + "vcvth_s32_f16", + "vcvth_s64_f16", + "vcvth_u16_f16", + "vcvth_u32_f16", + "vcvth_u64_f16", + "vcvtm_s16_f16", + "vcvtmq_s16_f16", + "vcvtm_s32_f32", + "vcvtmq_s32_f32", + "vcvtm_s64_f64", + "vcvtmq_s64_f64", + "vcvtm_u16_f16", + "vcvtmq_u16_f16", + "vcvtm_u32_f32", + "vcvtmq_u32_f32", + "vcvtm_u64_f64", + "vcvtmq_u64_f64", + "vcvtmh_s16_f16", + "vcvtmh_s32_f16", + "vcvtmh_s64_f16", + "vcvtmh_u16_f16", + "vcvtmh_u32_f16", + "vcvtmh_u64_f16", + "vcvtms_s32_f32", + "vcvtmd_s64_f64", + "vcvtms_u32_f32", + "vcvtmd_u64_f64", + "vcvtn_s16_f16", + "vcvtnq_s16_f16", + "vcvtn_s32_f32", + "vcvtnq_s32_f32", + "vcvtn_s64_f64", + "vcvtnq_s64_f64", + "vcvtn_u16_f16", + "vcvtnq_u16_f16", + "vcvtn_u32_f32", + "vcvtnq_u32_f32", + "vcvtn_u64_f64", + "vcvtnq_u64_f64", + "vcvtnh_s16_f16", + "vcvtnh_s32_f16", + "vcvtnh_s64_f16", + "vcvtnh_u16_f16", + "vcvtnh_u32_f16", + "vcvtnh_u64_f16", + "vcvtns_s32_f32", + "vcvtnd_s64_f64", + "vcvtns_u32_f32", + "vcvtnd_u64_f64", + "vcvtp_s16_f16", + "vcvtpq_s16_f16", + "vcvtp_s32_f32", + "vcvtpq_s32_f32", + "vcvtp_s64_f64", + "vcvtpq_s64_f64", + "vcvtp_u16_f16", + "vcvtpq_u16_f16", + "vcvtp_u32_f32", + "vcvtpq_u32_f32", + "vcvtp_u64_f64", + "vcvtpq_u64_f64", + "vcvtph_s16_f16", + "vcvtph_s32_f16", + "vcvtph_s64_f16", + "vcvtph_u16_f16", + "vcvtph_u32_f16", + "vcvtph_u64_f16", + "vcvtps_s32_f32", + "vcvtpd_s64_f64", + "vcvtps_u32_f32", + "vcvtpd_u64_f64", + "vcvts_f32_u32", + "vcvtd_f64_u64", + "vcvts_n_f32_s32", + "vcvtd_n_f64_s64", + "vcvts_n_f32_u32", + "vcvtd_n_f64_u64", + "vcvts_n_s32_f32", + "vcvtd_n_s64_f64", + "vcvts_n_u32_f32", + "vcvtd_n_u64_f64", + "vcvts_s32_f32", + "vcvtd_s64_f64", + "vcvts_u32_f32", + "vcvtd_u64_f64", + "vcvtx_f32_f64", + "vcvtx_high_f32_f64", + "vcvtxd_f32_f64", + "vdiv_f16", + "vdivq_f16", + "vdiv_f32", + "vdivq_f32", + "vdiv_f64", + "vdivq_f64", + "vdivh_f16", + "vdup_lane_f64", + "vdup_lane_p64", + "vdup_laneq_f64", + "vdup_laneq_p64", + "vdupb_lane_s8", + "vduph_laneq_s16", + "vdupb_lane_u8", + "vduph_laneq_u16", + "vdupb_lane_p8", + "vduph_laneq_p16", + "vdupb_laneq_s8", + "vdupb_laneq_u8", + "vdupb_laneq_p8", + "vdupd_lane_f64", + "vdupd_lane_s64", + "vdupd_lane_u64", + "vduph_lane_f16", + "vduph_laneq_f16", + "vdupq_lane_f64", + "vdupq_lane_p64", + "vdupq_laneq_f64", + "vdupq_laneq_p64", + "vdups_lane_f32", + "vdupd_laneq_f64", + "vdups_lane_s32", + "vdupd_laneq_s64", + "vdups_lane_u32", + "vdupd_laneq_u64", + "vdups_laneq_f32", + "vduph_lane_s16", + "vdups_laneq_s32", + "vduph_lane_u16", + "vdups_laneq_u32", + "vduph_lane_p16", + "veor3q_s8", + "veor3q_s16", + "veor3q_s32", + "veor3q_s64", + "veor3q_u8", + "veor3q_u16", + "veor3q_u32", + "veor3q_u64", + "vextq_f64", + "vextq_p64", + "vfma_f64", + "vfma_lane_f16", + "vfma_laneq_f16", + "vfmaq_lane_f16", + "vfmaq_laneq_f16", + "vfma_lane_f32", + "vfma_laneq_f32", + "vfmaq_lane_f32", + "vfmaq_laneq_f32", + "vfmaq_laneq_f64", + "vfma_lane_f64", + "vfma_laneq_f64", + "vfma_n_f16", + "vfmaq_n_f16", + "vfma_n_f64", + "vfmad_lane_f64", + "vfmah_f16", + "vfmah_lane_f16", + "vfmah_laneq_f16", + "vfmaq_f64", + "vfmaq_lane_f64", + "vfmaq_n_f64", + "vfmas_lane_f32", + "vfmas_laneq_f32", + "vfmad_laneq_f64", + "vfmlal_high_f16", + "vfmlalq_high_f16", + "vfmlal_lane_high_f16", + "vfmlal_laneq_high_f16", + "vfmlalq_lane_high_f16", + "vfmlalq_laneq_high_f16", + "vfmlal_lane_low_f16", + "vfmlal_laneq_low_f16", + "vfmlalq_lane_low_f16", + "vfmlalq_laneq_low_f16", + "vfmlal_low_f16", + "vfmlalq_low_f16", + "vfmlsl_high_f16", + "vfmlslq_high_f16", + "vfmlsl_lane_high_f16", + "vfmlsl_laneq_high_f16", + "vfmlslq_lane_high_f16", + "vfmlslq_laneq_high_f16", + "vfmlsl_lane_low_f16", + "vfmlsl_laneq_low_f16", + "vfmlslq_lane_low_f16", + "vfmlslq_laneq_low_f16", + "vfmlsl_low_f16", + "vfmlslq_low_f16", + "vfms_f64", + "vfms_lane_f16", + "vfms_laneq_f16", + "vfmsq_lane_f16", + "vfmsq_laneq_f16", + "vfms_lane_f32", + "vfms_laneq_f32", + "vfmsq_lane_f32", + "vfmsq_laneq_f32", + "vfmsq_laneq_f64", + "vfms_lane_f64", + "vfms_laneq_f64", + "vfms_n_f16", + "vfmsq_n_f16", + "vfms_n_f64", + "vfmsh_f16", + "vfmsh_lane_f16", + "vfmsh_laneq_f16", + "vfmsq_f64", + "vfmsq_lane_f64", + "vfmsq_n_f64", + "vfmss_lane_f32", + "vfmss_laneq_f32", + "vfmsd_lane_f64", + "vfmsd_laneq_f64", + "vld1_f16", + "vld1q_f16", + "vld1_f64_x2", + "vld1_f64_x3", + "vld1_f64_x4", + "vld1q_f64_x2", + "vld1q_f64_x3", + "vld1q_f64_x4", + "vld2_dup_f64", + "vld2q_dup_f64", + "vld2q_dup_s64", + "vld2_f64", + "vld2_lane_f64", + "vld2_lane_s64", + "vld2_lane_p64", + "vld2_lane_u64", + "vld2q_dup_p64", + "vld2q_dup_p64", + "vld2q_dup_u64", + "vld2q_dup_u64", + "vld2q_f64", + "vld2q_s64", + "vld2q_lane_f64", + "vld2q_lane_s8", + "vld2q_lane_s64", + "vld2q_lane_p64", + "vld2q_lane_u8", + "vld2q_lane_u64", + "vld2q_lane_p8", + "vld2q_p64", + "vld2q_p64", + "vld2q_u64", + "vld3_dup_f64", + "vld3q_dup_f64", + "vld3q_dup_s64", + "vld3_f64", + "vld3_lane_f64", + "vld3_lane_p64", + "vld3_lane_s64", + "vld3_lane_u64", + "vld3q_dup_p64", + "vld3q_dup_p64", + "vld3q_dup_u64", + "vld3q_dup_u64", + "vld3q_f64", + "vld3q_s64", + "vld3q_lane_f64", + "vld3q_lane_p64", + "vld3q_lane_s8", + "vld3q_lane_s64", + "vld3q_lane_u8", + "vld3q_lane_u64", + "vld3q_lane_p8", + "vld3q_p64", + "vld3q_p64", + "vld3q_u64", + "vld4_dup_f64", + "vld4q_dup_f64", + "vld4q_dup_s64", + "vld4_f64", + "vld4_lane_f64", + "vld4_lane_s64", + "vld4_lane_p64", + "vld4_lane_u64", + "vld4q_dup_p64", + "vld4q_dup_p64", + "vld4q_dup_u64", + "vld4q_dup_u64", + "vld4q_f64", + "vld4q_s64", + "vld4q_lane_f64", + "vld4q_lane_s8", + "vld4q_lane_s64", + "vld4q_lane_p64", + "vld4q_lane_u8", + "vld4q_lane_u64", + "vld4q_lane_p8", + "vld4q_p64", + "vld4q_p64", + "vld4q_u64", + "vldap1_lane_s64", + "vldap1q_lane_s64", + "vldap1q_lane_f64", + "vldap1_lane_u64", + "vldap1q_lane_u64", + "vldap1_lane_p64", + "vldap1q_lane_p64", + "vluti2_lane_f16", + "vluti2q_lane_f16", + "vluti2_lane_u8", + "vluti2q_lane_u8", + "vluti2_lane_u16", + "vluti2q_lane_u16", + "vluti2_lane_p8", + "vluti2q_lane_p8", + "vluti2_lane_p16", + "vluti2q_lane_p16", + "vluti2_lane_s8", + "vluti2q_lane_s8", + "vluti2_lane_s16", + "vluti2q_lane_s16", + "vluti2_laneq_f16", + "vluti2q_laneq_f16", + "vluti2_laneq_u8", + "vluti2q_laneq_u8", + "vluti2_laneq_u16", + "vluti2q_laneq_u16", + "vluti2_laneq_p8", + "vluti2q_laneq_p8", + "vluti2_laneq_p16", + "vluti2q_laneq_p16", + "vluti2_laneq_s8", + "vluti2q_laneq_s8", + "vluti2_laneq_s16", + "vluti2q_laneq_s16", + "vluti4q_lane_f16_x2", + "vluti4q_lane_u16_x2", + "vluti4q_lane_p16_x2", + "vluti4q_lane_s16_x2", + "vluti4q_lane_s8", + "vluti4q_lane_u8", + "vluti4q_lane_p8", + "vluti4q_laneq_f16_x2", + "vluti4q_laneq_u16_x2", + "vluti4q_laneq_p16_x2", + "vluti4q_laneq_s16_x2", + "vluti4q_laneq_s8", + "vluti4q_laneq_u8", + "vluti4q_laneq_p8", + "vmax_f64", + "vmaxq_f64", + "vmaxh_f16", + "vmaxnm_f64", + "vmaxnmq_f64", + "vmaxnmh_f16", + "vmaxnmv_f16", + "vmaxnmvq_f16", + "vmaxnmv_f32", + "vmaxnmvq_f64", + "vmaxnmvq_f32", + "vmaxv_f16", + "vmaxvq_f16", + "vmaxv_f32", + "vmaxvq_f32", + "vmaxvq_f64", + "vmaxv_s8", + "vmaxvq_s8", + "vmaxv_s16", + "vmaxvq_s16", + "vmaxv_s32", + "vmaxvq_s32", + "vmaxv_u8", + "vmaxvq_u8", + "vmaxv_u16", + "vmaxvq_u16", + "vmaxv_u32", + "vmaxvq_u32", + "vmin_f64", + "vminq_f64", + "vminh_f16", + "vminnm_f64", + "vminnmq_f64", + "vminnmh_f16", + "vminnmv_f16", + "vminnmvq_f16", + "vminnmv_f32", + "vminnmvq_f64", + "vminnmvq_f32", + "vminv_f16", + "vminvq_f16", + "vminv_f32", + "vminvq_f32", + "vminvq_f64", + "vminv_s8", + "vminvq_s8", + "vminv_s16", + "vminvq_s16", + "vminv_s32", + "vminvq_s32", + "vminv_u8", + "vminvq_u8", + "vminv_u16", + "vminvq_u16", + "vminv_u32", + "vminvq_u32", + "vmla_f64", + "vmlaq_f64", + "vmlal_high_lane_s16", + "vmlal_high_laneq_s16", + "vmlal_high_lane_s32", + "vmlal_high_laneq_s32", + "vmlal_high_lane_u16", + "vmlal_high_laneq_u16", + "vmlal_high_lane_u32", + "vmlal_high_laneq_u32", + "vmlal_high_n_s16", + "vmlal_high_n_s32", + "vmlal_high_n_u16", + "vmlal_high_n_u32", + "vmlal_high_s8", + "vmlal_high_s16", + "vmlal_high_s32", + "vmlal_high_u8", + "vmlal_high_u16", + "vmlal_high_u32", + "vmls_f64", + "vmlsq_f64", + "vmlsl_high_lane_s16", + "vmlsl_high_laneq_s16", + "vmlsl_high_lane_s32", + "vmlsl_high_laneq_s32", + "vmlsl_high_lane_u16", + "vmlsl_high_laneq_u16", + "vmlsl_high_lane_u32", + "vmlsl_high_laneq_u32", + "vmlsl_high_n_s16", + "vmlsl_high_n_s32", + "vmlsl_high_n_u16", + "vmlsl_high_n_u32", + "vmlsl_high_s8", + "vmlsl_high_s16", + "vmlsl_high_s32", + "vmlsl_high_u8", + "vmlsl_high_u16", + "vmlsl_high_u32", + "vmovl_high_s8", + "vmovl_high_s16", + "vmovl_high_s32", + "vmovl_high_u8", + "vmovl_high_u16", + "vmovl_high_u32", + "vmovn_high_s16", + "vmovn_high_s32", + "vmovn_high_s64", + "vmovn_high_u16", + "vmovn_high_u32", + "vmovn_high_u64", + "vmul_f64", + "vmulq_f64", + "vmul_lane_f64", + "vmul_laneq_f16", + "vmulq_laneq_f16", + "vmul_laneq_f64", + "vmul_n_f64", + "vmulq_n_f64", + "vmuld_lane_f64", + "vmulh_f16", + "vmulh_lane_f16", + "vmulh_laneq_f16", + "vmull_high_lane_s16", + "vmull_high_laneq_s16", + "vmull_high_lane_s32", + "vmull_high_laneq_s32", + "vmull_high_lane_u16", + "vmull_high_laneq_u16", + "vmull_high_lane_u32", + "vmull_high_laneq_u32", + "vmull_high_n_s16", + "vmull_high_n_s32", + "vmull_high_n_u16", + "vmull_high_n_u32", + "vmull_high_p64", + "vmull_high_p8", + "vmull_high_s8", + "vmull_high_s16", + "vmull_high_s32", + "vmull_high_u8", + "vmull_high_u16", + "vmull_high_u32", + "vmull_p64", + "vmulq_lane_f64", + "vmulq_laneq_f64", + "vmuls_lane_f32", + "vmuls_laneq_f32", + "vmuld_laneq_f64", + "vmulx_f16", + "vmulxq_f16", + "vmulx_f32", + "vmulxq_f32", + "vmulx_f64", + "vmulxq_f64", + "vmulx_lane_f16", + "vmulx_laneq_f16", + "vmulxq_lane_f16", + "vmulxq_laneq_f16", + "vmulx_lane_f32", + "vmulx_laneq_f32", + "vmulxq_lane_f32", + "vmulxq_laneq_f32", + "vmulxq_laneq_f64", + "vmulx_lane_f64", + "vmulx_laneq_f64", + "vmulx_n_f16", + "vmulxq_n_f16", + "vmulxd_f64", + "vmulxs_f32", + "vmulxd_lane_f64", + "vmulxd_laneq_f64", + "vmulxs_lane_f32", + "vmulxs_laneq_f32", + "vmulxh_f16", + "vmulxh_lane_f16", + "vmulxh_laneq_f16", + "vmulxq_lane_f64", + "vneg_f64", + "vnegq_f64", + "vneg_s64", + "vnegq_s64", + "vnegd_s64", + "vnegh_f16", + "vpaddd_f64", + "vpadds_f32", + "vpaddd_s64", + "vpaddd_u64", + "vpaddq_f16", + "vpaddq_f32", + "vpaddq_f64", + "vpaddq_s8", + "vpaddq_s16", + "vpaddq_s32", + "vpaddq_s64", + "vpaddq_u8", + "vpaddq_u16", + "vpaddq_u32", + "vpaddq_u64", + "vpmax_f16", + "vpmaxq_f16", + "vpmaxnm_f16", + "vpmaxnmq_f16", + "vpmaxnm_f32", + "vpmaxnmq_f32", + "vpmaxnmq_f64", + "vpmaxnmqd_f64", + "vpmaxnms_f32", + "vpmaxq_s8", + "vpmaxq_s16", + "vpmaxq_s32", + "vpmaxq_u8", + "vpmaxq_u16", + "vpmaxq_u32", + "vpmaxqd_f64", + "vpmaxs_f32", + "vpmin_f16", + "vpminq_f16", + "vpminnm_f16", + "vpminnmq_f16", + "vpminnm_f32", + "vpminnmq_f32", + "vpminnmq_f64", + "vpminnmqd_f64", + "vpminnms_f32", + "vpminq_s8", + "vpminq_s16", + "vpminq_s32", + "vpminq_u8", + "vpminq_u16", + "vpminq_u32", + "vpminqd_f64", + "vpmins_f32", + "vqabs_s64", + "vqabsq_s64", + "vqabsb_s8", + "vqabsh_s16", + "vqabss_s32", + "vqabsd_s64", + "vqaddb_s8", + "vqaddh_s16", + "vqaddb_u8", + "vqaddh_u16", + "vqadds_s32", + "vqaddd_s64", + "vqadds_u32", + "vqaddd_u64", + "vqdmlal_high_lane_s16", + "vqdmlal_high_laneq_s16", + "vqdmlal_high_lane_s32", + "vqdmlal_high_laneq_s32", + "vqdmlal_high_n_s16", + "vqdmlal_high_s16", + "vqdmlal_high_n_s32", + "vqdmlal_high_s32", + "vqdmlal_laneq_s16", + "vqdmlal_laneq_s32", + "vqdmlalh_lane_s16", + "vqdmlalh_laneq_s16", + "vqdmlals_lane_s32", + "vqdmlals_laneq_s32", + "vqdmlalh_s16", + "vqdmlals_s32", + "vqdmlsl_high_lane_s16", + "vqdmlsl_high_laneq_s16", + "vqdmlsl_high_lane_s32", + "vqdmlsl_high_laneq_s32", + "vqdmlsl_high_n_s16", + "vqdmlsl_high_s16", + "vqdmlsl_high_n_s32", + "vqdmlsl_high_s32", + "vqdmlsl_laneq_s16", + "vqdmlsl_laneq_s32", + "vqdmlslh_lane_s16", + "vqdmlslh_laneq_s16", + "vqdmlsls_lane_s32", + "vqdmlsls_laneq_s32", + "vqdmlslh_s16", + "vqdmlsls_s32", + "vqdmulh_lane_s16", + "vqdmulhq_lane_s16", + "vqdmulh_lane_s32", + "vqdmulhq_lane_s32", + "vqdmulhh_lane_s16", + "vqdmulhh_laneq_s16", + "vqdmulhh_s16", + "vqdmulhs_s32", + "vqdmulhs_lane_s32", + "vqdmulhs_laneq_s32", + "vqdmull_high_lane_s16", + "vqdmull_high_laneq_s32", + "vqdmull_high_lane_s32", + "vqdmull_high_laneq_s16", + "vqdmull_high_n_s16", + "vqdmull_high_n_s32", + "vqdmull_high_s16", + "vqdmull_high_s32", + "vqdmull_laneq_s16", + "vqdmull_laneq_s32", + "vqdmullh_lane_s16", + "vqdmulls_laneq_s32", + "vqdmullh_laneq_s16", + "vqdmullh_s16", + "vqdmulls_lane_s32", + "vqdmulls_s32", + "vqmovn_high_s16", + "vqmovn_high_s32", + "vqmovn_high_s64", + "vqmovn_high_u16", + "vqmovn_high_u32", + "vqmovn_high_u64", + "vqmovnd_s64", + "vqmovnd_u64", + "vqmovnh_s16", + "vqmovns_s32", + "vqmovnh_u16", + "vqmovns_u32", + "vqmovun_high_s16", + "vqmovun_high_s32", + "vqmovun_high_s64", + "vqmovunh_s16", + "vqmovuns_s32", + "vqmovund_s64", + "vqneg_s64", + "vqnegq_s64", + "vqnegb_s8", + "vqnegh_s16", + "vqnegs_s32", + "vqnegd_s64", + "vqrdmlah_lane_s16", + "vqrdmlah_lane_s32", + "vqrdmlah_laneq_s16", + "vqrdmlah_laneq_s32", + "vqrdmlahq_lane_s16", + "vqrdmlahq_lane_s32", + "vqrdmlahq_laneq_s16", + "vqrdmlahq_laneq_s32", + "vqrdmlah_s16", + "vqrdmlahq_s16", + "vqrdmlah_s32", + "vqrdmlahq_s32", + "vqrdmlahh_lane_s16", + "vqrdmlahh_laneq_s16", + "vqrdmlahs_lane_s32", + "vqrdmlahs_laneq_s32", + "vqrdmlahh_s16", + "vqrdmlahs_s32", + "vqrdmlsh_lane_s16", + "vqrdmlsh_lane_s32", + "vqrdmlsh_laneq_s16", + "vqrdmlsh_laneq_s32", + "vqrdmlshq_lane_s16", + "vqrdmlshq_lane_s32", + "vqrdmlshq_laneq_s16", + "vqrdmlshq_laneq_s32", + "vqrdmlsh_s16", + "vqrdmlshq_s16", + "vqrdmlsh_s32", + "vqrdmlshq_s32", + "vqrdmlshh_lane_s16", + "vqrdmlshh_laneq_s16", + "vqrdmlshs_lane_s32", + "vqrdmlshs_laneq_s32", + "vqrdmlshh_s16", + "vqrdmlshs_s32", + "vqrdmulhh_lane_s16", + "vqrdmulhh_laneq_s16", + "vqrdmulhs_lane_s32", + "vqrdmulhs_laneq_s32", + "vqrdmulhh_s16", + "vqrdmulhs_s32", + "vqrshlb_s8", + "vqrshlh_s16", + "vqrshlb_u8", + "vqrshlh_u16", + "vqrshld_s64", + "vqrshls_s32", + "vqrshls_u32", + "vqrshld_u64", + "vqrshrn_high_n_s16", + "vqrshrn_high_n_s32", + "vqrshrn_high_n_s64", + "vqrshrn_high_n_u16", + "vqrshrn_high_n_u32", + "vqrshrn_high_n_u64", + "vqrshrnd_n_u64", + "vqrshrnh_n_u16", + "vqrshrns_n_u32", + "vqrshrnh_n_s16", + "vqrshrns_n_s32", + "vqrshrnd_n_s64", + "vqrshrun_high_n_s16", + "vqrshrun_high_n_s32", + "vqrshrun_high_n_s64", + "vqrshrund_n_s64", + "vqrshrunh_n_s16", + "vqrshruns_n_s32", + "vqshlb_n_s8", + "vqshld_n_s64", + "vqshlh_n_s16", + "vqshls_n_s32", + "vqshlb_n_u8", + "vqshld_n_u64", + "vqshlh_n_u16", + "vqshls_n_u32", + "vqshlb_s8", + "vqshlh_s16", + "vqshls_s32", + "vqshlb_u8", + "vqshlh_u16", + "vqshls_u32", + "vqshld_s64", + "vqshld_u64", + "vqshlub_n_s8", + "vqshlud_n_s64", + "vqshluh_n_s16", + "vqshlus_n_s32", + "vqshrn_high_n_s16", + "vqshrn_high_n_s32", + "vqshrn_high_n_s64", + "vqshrn_high_n_u16", + "vqshrn_high_n_u32", + "vqshrn_high_n_u64", + "vqshrnd_n_s64", + "vqshrnd_n_u64", + "vqshrnh_n_s16", + "vqshrns_n_s32", + "vqshrnh_n_u16", + "vqshrns_n_u32", + "vqshrun_high_n_s16", + "vqshrun_high_n_s32", + "vqshrun_high_n_s64", + "vqshrund_n_s64", + "vqshrunh_n_s16", + "vqshruns_n_s32", + "vqsubb_s8", + "vqsubh_s16", + "vqsubb_u8", + "vqsubh_u16", + "vqsubs_s32", + "vqsubd_s64", + "vqsubs_u32", + "vqsubd_u64", + "vrax1q_u64", + "vrbit_s8", + "vrbitq_s8", + "vrbit_u8", + "vrbit_u8", + "vrbitq_u8", + "vrbitq_u8", + "vrbit_p8", + "vrbit_p8", + "vrbitq_p8", + "vrbitq_p8", + "vrecpe_f64", + "vrecpeq_f64", + "vrecped_f64", + "vrecpes_f32", + "vrecpeh_f16", + "vrecps_f64", + "vrecpsq_f64", + "vrecpsd_f64", + "vrecpss_f32", + "vrecpsh_f16", + "vrecpxd_f64", + "vrecpxs_f32", + "vrecpxh_f16", + "vreinterpret_f64_f16", + "vreinterpret_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpretq_f64_f16", + "vreinterpret_f16_f64", + "vreinterpret_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f16_f64", + "vreinterpretq_f64_p128", + "vreinterpretq_f64_p128", + "vreinterpret_f64_f32", + "vreinterpret_f64_f32", + "vreinterpret_p64_f32", + "vreinterpret_p64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_f64_f32", + "vreinterpretq_p64_f32", + "vreinterpretq_p64_f32", + "vreinterpret_f32_f64", + "vreinterpret_f32_f64", + "vreinterpret_s8_f64", + "vreinterpret_s8_f64", + "vreinterpret_s16_f64", + "vreinterpret_s16_f64", + "vreinterpret_s32_f64", + "vreinterpret_s32_f64", + "vreinterpret_s64_f64", + "vreinterpret_u8_f64", + "vreinterpret_u8_f64", + "vreinterpret_u16_f64", + "vreinterpret_u16_f64", + "vreinterpret_u32_f64", + "vreinterpret_u32_f64", + "vreinterpret_u64_f64", + "vreinterpret_p8_f64", + "vreinterpret_p8_f64", + "vreinterpret_p16_f64", + "vreinterpret_p16_f64", + "vreinterpret_p64_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_p128_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_f32_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s8_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s16_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s32_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_s64_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u8_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u16_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u32_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_u64_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p8_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p16_f64", + "vreinterpretq_p64_f64", + "vreinterpretq_p64_f64", + "vreinterpret_f64_s8", + "vreinterpret_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpretq_f64_s8", + "vreinterpret_f64_s16", + "vreinterpret_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpretq_f64_s16", + "vreinterpret_f64_s32", + "vreinterpret_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpretq_f64_s32", + "vreinterpret_f64_s64", + "vreinterpret_p64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_f64_s64", + "vreinterpretq_p64_s64", + "vreinterpretq_p64_s64", + "vreinterpret_f64_u8", + "vreinterpret_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpretq_f64_u8", + "vreinterpret_f64_u16", + "vreinterpret_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpretq_f64_u16", + "vreinterpret_f64_u32", + "vreinterpret_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpretq_f64_u32", + "vreinterpret_f64_u64", + "vreinterpret_p64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_f64_u64", + "vreinterpretq_p64_u64", + "vreinterpretq_p64_u64", + "vreinterpret_f64_p8", + "vreinterpret_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpretq_f64_p8", + "vreinterpret_f64_p16", + "vreinterpret_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpretq_f64_p16", + "vreinterpret_f32_p64", + "vreinterpret_f32_p64", + "vreinterpret_f64_p64", + "vreinterpret_s64_p64", + "vreinterpret_u64_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f32_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_f64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_s64_p64", + "vreinterpretq_u64_p64", + "vreinterpretq_u64_p64", + "vrnd32x_f32", + "vrnd32xq_f32", + "vrnd32xq_f64", + "vrnd32x_f64", + "vrnd32z_f32", + "vrnd32zq_f32", + "vrnd32zq_f64", + "vrnd32z_f64", + "vrnd64x_f32", + "vrnd64xq_f32", + "vrnd64xq_f64", + "vrnd64x_f64", + "vrnd64z_f32", + "vrnd64zq_f32", + "vrnd64zq_f64", + "vrnd64z_f64", + "vrnd_f16", + "vrndq_f16", + "vrnd_f32", + "vrndq_f32", + "vrnd_f64", + "vrndq_f64", + "vrnda_f16", + "vrndaq_f16", + "vrnda_f32", + "vrndaq_f32", + "vrnda_f64", + "vrndaq_f64", + "vrndah_f16", + "vrndh_f16", + "vrndi_f16", + "vrndiq_f16", + "vrndi_f32", + "vrndiq_f32", + "vrndi_f64", + "vrndiq_f64", + "vrndih_f16", + "vrndm_f16", + "vrndmq_f16", + "vrndm_f32", + "vrndmq_f32", + "vrndm_f64", + "vrndmq_f64", + "vrndmh_f16", + "vrndn_f64", + "vrndnq_f64", + "vrndnh_f16", + "vrndns_f32", + "vrndp_f16", + "vrndpq_f16", + "vrndp_f32", + "vrndpq_f32", + "vrndp_f64", + "vrndpq_f64", + "vrndph_f16", + "vrndx_f16", + "vrndxq_f16", + "vrndx_f32", + "vrndxq_f32", + "vrndx_f64", + "vrndxq_f64", + "vrndxh_f16", + "vrshld_s64", + "vrshld_u64", + "vrshrd_n_s64", + "vrshrd_n_u64", + "vrshrn_high_n_s16", + "vrshrn_high_n_s32", + "vrshrn_high_n_s64", + "vrshrn_high_n_u16", + "vrshrn_high_n_u32", + "vrshrn_high_n_u64", + "vrsqrte_f64", + "vrsqrteq_f64", + "vrsqrted_f64", + "vrsqrtes_f32", + "vrsqrteh_f16", + "vrsqrts_f64", + "vrsqrtsq_f64", + "vrsqrtsd_f64", + "vrsqrtss_f32", + "vrsqrtsh_f16", + "vrsrad_n_s64", + "vrsrad_n_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vrsubhn_high_s16", + "vrsubhn_high_s32", + "vrsubhn_high_s64", + "vrsubhn_high_u16", + "vrsubhn_high_u32", + "vrsubhn_high_u64", + "vscale_f16", + "vscaleq_f16", + "vscale_f32", + "vscaleq_f32", + "vscaleq_f64", + "vset_lane_f64", + "vsetq_lane_f64", + "vsha512h2q_u64", + "vsha512hq_u64", + "vsha512su0q_u64", + "vsha512su1q_u64", + "vshld_s64", + "vshld_u64", + "vshll_high_n_s8", + "vshll_high_n_s16", + "vshll_high_n_s32", + "vshll_high_n_u8", + "vshll_high_n_u16", + "vshll_high_n_u32", + "vshrn_high_n_s16", + "vshrn_high_n_s32", + "vshrn_high_n_s64", + "vshrn_high_n_u16", + "vshrn_high_n_u32", + "vshrn_high_n_u64", + "vslid_n_s64", + "vslid_n_u64", + "vsm3partw1q_u32", + "vsm3partw2q_u32", + "vsm3ss1q_u32", + "vsm3tt1aq_u32", + "vsm3tt1bq_u32", + "vsm3tt2aq_u32", + "vsm3tt2bq_u32", + "vsm4ekeyq_u32", + "vsm4eq_u32", + "vsqadd_u8", + "vsqaddq_u8", + "vsqadd_u16", + "vsqaddq_u16", + "vsqadd_u32", + "vsqaddq_u32", + "vsqadd_u64", + "vsqaddq_u64", + "vsqaddb_u8", + "vsqaddh_u16", + "vsqaddd_u64", + "vsqadds_u32", + "vsqrt_f16", + "vsqrtq_f16", + "vsqrt_f32", + "vsqrtq_f32", + "vsqrt_f64", + "vsqrtq_f64", + "vsqrth_f16", + "vsrid_n_s64", + "vsrid_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f64_x2", + "vst1q_f64_x2", + "vst1_f64_x3", + "vst1q_f64_x3", + "vst1_f64_x4", + "vst1q_f64_x4", + "vst1_lane_f64", + "vst1q_lane_f64", + "vst2_f64", + "vst2_lane_f64", + "vst2_lane_s64", + "vst2_lane_p64", + "vst2_lane_u64", + "vst2q_f64", + "vst2q_s64", + "vst2q_lane_f64", + "vst2q_lane_s8", + "vst2q_lane_s64", + "vst2q_lane_p64", + "vst2q_lane_u8", + "vst2q_lane_u64", + "vst2q_lane_p8", + "vst2q_p64", + "vst2q_u64", + "vst3_f64", + "vst3_lane_f64", + "vst3_lane_s64", + "vst3_lane_p64", + "vst3_lane_u64", + "vst3q_f64", + "vst3q_s64", + "vst3q_lane_f64", + "vst3q_lane_s8", + "vst3q_lane_s64", + "vst3q_lane_p64", + "vst3q_lane_u8", + "vst3q_lane_u64", + "vst3q_lane_p8", + "vst3q_p64", + "vst3q_u64", + "vst4_f64", + "vst4_lane_f64", + "vst4_lane_s64", + "vst4_lane_p64", + "vst4_lane_u64", + "vst4q_f64", + "vst4q_s64", + "vst4q_lane_f64", + "vst4q_lane_s8", + "vst4q_lane_s64", + "vst4q_lane_p64", + "vst4q_lane_u8", + "vst4q_lane_u64", + "vst4q_lane_p8", + "vst4q_p64", + "vst4q_u64", + "vstl1_lane_f64", + "vstl1q_lane_f64", + "vstl1_lane_u64", + "vstl1q_lane_u64", + "vstl1_lane_p64", + "vstl1q_lane_p64", + "vstl1_lane_s64", + "vstl1q_lane_s64", + "vsub_f64", + "vsubq_f64", + "vsubd_s64", + "vsubd_u64", + "vsubh_f16", + "vsubl_high_s8", + "vsubl_high_s16", + "vsubl_high_s32", + "vsubl_high_u8", + "vsubl_high_u16", + "vsubl_high_u32", + "vsubw_high_s8", + "vsubw_high_s16", + "vsubw_high_s32", + "vsubw_high_u8", + "vsubw_high_u16", + "vsubw_high_u32", + "vtrn1_f16", + "vtrn1q_f16", + "vtrn1_f32", + "vtrn1q_f64", + "vtrn1_s32", + "vtrn1q_s64", + "vtrn1_u32", + "vtrn1q_u64", + "vtrn1q_p64", + "vtrn1q_f32", + "vtrn1_s8", + "vtrn1q_s8", + "vtrn1_s16", + "vtrn1q_s16", + "vtrn1q_s32", + "vtrn1_u8", + "vtrn1q_u8", + "vtrn1_u16", + "vtrn1q_u16", + "vtrn1q_u32", + "vtrn1_p8", + "vtrn1q_p8", + "vtrn1_p16", + "vtrn1q_p16", + "vtrn2_f16", + "vtrn2q_f16", + "vtrn2_f32", + "vtrn2q_f64", + "vtrn2_s32", + "vtrn2q_s64", + "vtrn2_u32", + "vtrn2q_u64", + "vtrn2q_p64", + "vtrn2q_f32", + "vtrn2_s8", + "vtrn2q_s8", + "vtrn2_s16", + "vtrn2q_s16", + "vtrn2q_s32", + "vtrn2_u8", + "vtrn2q_u8", + "vtrn2_u16", + "vtrn2q_u16", + "vtrn2q_u32", + "vtrn2_p8", + "vtrn2q_p8", + "vtrn2_p16", + "vtrn2q_p16", + "vtst_s64", + "vtstq_s64", + "vtst_p64", + "vtstq_p64", + "vtst_u64", + "vtstq_u64", + "vtstd_s64", + "vtstd_u64", + "vuqadd_s8", + "vuqaddq_s8", + "vuqadd_s16", + "vuqaddq_s16", + "vuqadd_s32", + "vuqaddq_s32", + "vuqadd_s64", + "vuqaddq_s64", + "vuqaddb_s8", + "vuqaddh_s16", + "vuqaddd_s64", + "vuqadds_s32", + "vuzp1_f16", + "vuzp1q_f16", + "vuzp1_f32", + "vuzp1q_f64", + "vuzp1_s32", + "vuzp1q_s64", + "vuzp1_u32", + "vuzp1q_u64", + "vuzp1q_p64", + "vuzp1q_f32", + "vuzp1_s8", + "vuzp1q_s8", + "vuzp1_s16", + "vuzp1q_s16", + "vuzp1q_s32", + "vuzp1_u8", + "vuzp1q_u8", + "vuzp1_u16", + "vuzp1q_u16", + "vuzp1q_u32", + "vuzp1_p8", + "vuzp1q_p8", + "vuzp1_p16", + "vuzp1q_p16", + "vuzp2_f16", + "vuzp2q_f16", + "vuzp2_f32", + "vuzp2q_f64", + "vuzp2_s32", + "vuzp2q_s64", + "vuzp2_u32", + "vuzp2q_u64", + "vuzp2q_p64", + "vuzp2q_f32", + "vuzp2_s8", + "vuzp2q_s8", + "vuzp2_s16", + "vuzp2q_s16", + "vuzp2q_s32", + "vuzp2_u8", + "vuzp2q_u8", + "vuzp2_u16", + "vuzp2q_u16", + "vuzp2q_u32", + "vuzp2_p8", + "vuzp2q_p8", + "vuzp2_p16", + "vuzp2q_p16", + "vxarq_u64", + "vzip1_f16", + "vzip1q_f16", + "vzip1_f32", + "vzip1q_f32", + "vzip1q_f64", + "vzip1_s8", + "vzip1q_s8", + "vzip1_s16", + "vzip1q_s16", + "vzip1_s32", + "vzip1q_s32", + "vzip1q_s64", + "vzip1_u8", + "vzip1q_u8", + "vzip1_u16", + "vzip1q_u16", + "vzip1_u32", + "vzip1q_u32", + "vzip1q_u64", + "vzip1_p8", + "vzip1q_p8", + "vzip1_p16", + "vzip1q_p16", + "vzip1q_p64", + "vzip2_f16", + "vzip2q_f16", + "vzip2_f32", + "vzip2q_f32", + "vzip2q_f64", + "vzip2_s8", + "vzip2q_s8", + "vzip2_s16", + "vzip2q_s16", + "vzip2_s32", + "vzip2q_s32", + "vzip2q_s64", + "vzip2_u8", + "vzip2q_u8", + "vzip2_u16", + "vzip2q_u16", + "vzip2_u32", + "vzip2q_u32", + "vzip2q_u64", + "vzip2_p8", + "vzip2q_p8", + "vzip2_p16", + "vzip2q_p16", + "vzip2q_p64", + "__crc32b", + "__crc32cb", + "__crc32cd", + "__crc32ch", + "__crc32cw", + "__crc32d", + "__crc32h", + "__crc32w", + "vabal_s8", + "vabal_s16", + "vabal_s32", + "vabal_u8", + "vabal_u16", + "vabal_u32", + "vabd_f16", + "vabdq_f16", + "vabd_f32", + "vabdq_f32", + "vabd_s8", + "vabdq_s8", + "vabd_s16", + "vabdq_s16", + "vabd_s32", + "vabdq_s32", + "vabd_u8", + "vabdq_u8", + "vabd_u16", + "vabdq_u16", + "vabd_u32", + "vabdq_u32", + "vabdl_s8", + "vabdl_s16", + "vabdl_s32", + "vabdl_u8", + "vabdl_u16", + "vabdl_u32", + "vabs_f16", + "vabsq_f16", + "vabs_f32", + "vabsq_f32", + "vabs_s8", + "vabsq_s8", + "vabs_s16", + "vabsq_s16", + "vabs_s32", + "vabsq_s32", + "vabsh_f16", + "vadd_f16", + "vaddq_f16", + "vadd_p8", + "vaddq_p8", + "vadd_p16", + "vaddq_p16", + "vadd_p64", + "vaddq_p64", + "vaddh_f16", + "vaddhn_high_s16", + "vaddhn_high_s32", + "vaddhn_high_s64", + "vaddhn_high_u16", + "vaddhn_high_u32", + "vaddhn_high_u64", + "vaddhn_s16", + "vaddhn_s32", + "vaddhn_s64", + "vaddhn_u16", + "vaddhn_u32", + "vaddhn_u64", + "vaddq_p128", + "vaesdq_u8", + "vaeseq_u8", + "vaesimcq_u8", + "vaesmcq_u8", + "vbsl_f16", + "vbslq_f16", + "vcage_f16", + "vcageq_f16", + "vcage_f32", + "vcageq_f32", + "vcagt_f16", + "vcagtq_f16", + "vcagt_f32", + "vcagtq_f32", + "vcale_f16", + "vcaleq_f16", + "vcale_f32", + "vcaleq_f32", + "vcalt_f16", + "vcaltq_f16", + "vcalt_f32", + "vcaltq_f32", + "vceq_f16", + "vceqq_f16", + "vceq_p8", + "vceqq_p8", + "vcge_f16", + "vcgeq_f16", + "vcgez_f16", + "vcgezq_f16", + "vcgt_f16", + "vcgtq_f16", + "vcgtz_f16", + "vcgtzq_f16", + "vcle_f16", + "vcleq_f16", + "vclez_f16", + "vclezq_f16", + "vcls_s8", + "vclsq_s8", + "vcls_s16", + "vclsq_s16", + "vcls_s32", + "vclsq_s32", + "vcls_u8", + "vclsq_u8", + "vcls_u16", + "vclsq_u16", + "vcls_u32", + "vclsq_u32", + "vclt_f16", + "vcltq_f16", + "vcltz_f16", + "vcltzq_f16", + "vclz_s8", + "vclzq_s8", + "vclz_s16", + "vclzq_s16", + "vclz_s32", + "vclzq_s32", + "vclz_u16", + "vclz_u16", + "vclzq_u16", + "vclzq_u16", + "vclz_u32", + "vclz_u32", + "vclzq_u32", + "vclzq_u32", + "vclz_u8", + "vclz_u8", + "vclzq_u8", + "vclzq_u8", + "vcnt_s8", + "vcntq_s8", + "vcnt_u8", + "vcnt_u8", + "vcntq_u8", + "vcntq_u8", + "vcnt_p8", + "vcnt_p8", + "vcntq_p8", + "vcntq_p8", + "vcombine_f16", + "vcreate_f16", + "vcreate_f16", + "vcreate_f32", + "vcreate_f32", + "vcreate_s8", + "vcreate_s8", + "vcreate_s16", + "vcreate_s16", + "vcreate_s32", + "vcreate_s32", + "vcreate_s64", + "vcreate_u8", + "vcreate_u8", + "vcreate_u16", + "vcreate_u16", + "vcreate_u32", + "vcreate_u32", + "vcreate_u64", + "vcreate_p8", + "vcreate_p8", + "vcreate_p16", + "vcreate_p16", + "vcreate_p64", + "vcvt_f16_f32", + "vcvt_f16_s16", + "vcvtq_f16_s16", + "vcvt_f16_u16", + "vcvtq_f16_u16", + "vcvt_f32_f16", + "vcvt_f32_s32", + "vcvtq_f32_s32", + "vcvt_f32_u32", + "vcvtq_f32_u32", + "vcvt_n_f16_s16", + "vcvtq_n_f16_s16", + "vcvt_n_f16_u16", + "vcvtq_n_f16_u16", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_s32", + "vcvtq_n_f32_s32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_f32_u32", + "vcvtq_n_f32_u32", + "vcvt_n_s16_f16", + "vcvtq_n_s16_f16", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_s32_f32", + "vcvtq_n_s32_f32", + "vcvt_n_u16_f16", + "vcvtq_n_u16_f16", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_n_u32_f32", + "vcvtq_n_u32_f32", + "vcvt_s16_f16", + "vcvtq_s16_f16", + "vcvt_s32_f32", + "vcvtq_s32_f32", + "vcvt_u16_f16", + "vcvtq_u16_f16", + "vcvt_u32_f32", + "vcvtq_u32_f32", + "vdot_lane_s32", + "vdot_lane_s32", + "vdotq_lane_s32", + "vdotq_lane_s32", + "vdot_lane_u32", + "vdot_lane_u32", + "vdotq_lane_u32", + "vdotq_lane_u32", + "vdot_laneq_s32", + "vdot_laneq_s32", + "vdotq_laneq_s32", + "vdotq_laneq_s32", + "vdot_laneq_u32", + "vdot_laneq_u32", + "vdotq_laneq_u32", + "vdotq_laneq_u32", + "vdot_s32", + "vdotq_s32", + "vdot_u32", + "vdotq_u32", + "vdup_lane_f16", + "vdupq_lane_f16", + "vdup_lane_f32", + "vdup_lane_s32", + "vdup_lane_u32", + "vdupq_lane_f32", + "vdupq_lane_s32", + "vdupq_lane_u32", + "vdup_lane_p16", + "vdup_lane_s16", + "vdup_lane_u16", + "vdupq_lane_p16", + "vdupq_lane_s16", + "vdupq_lane_u16", + "vdup_lane_p8", + "vdup_lane_s8", + "vdup_lane_u8", + "vdupq_lane_p8", + "vdupq_lane_s8", + "vdupq_lane_u8", + "vdup_lane_s64", + "vdup_lane_u64", + "vdup_laneq_f16", + "vdupq_laneq_f16", + "vdup_laneq_f32", + "vdup_laneq_s32", + "vdup_laneq_u32", + "vdupq_laneq_f32", + "vdupq_laneq_s32", + "vdupq_laneq_u32", + "vdup_laneq_p16", + "vdup_laneq_s16", + "vdup_laneq_u16", + "vdupq_laneq_p16", + "vdupq_laneq_s16", + "vdupq_laneq_u16", + "vdup_laneq_p8", + "vdup_laneq_s8", + "vdup_laneq_u8", + "vdupq_laneq_p8", + "vdupq_laneq_s8", + "vdupq_laneq_u8", + "vdup_laneq_s64", + "vdup_laneq_u64", + "vdup_n_f16", + "vdupq_n_f16", + "vdupq_lane_s64", + "vdupq_lane_u64", + "vdupq_laneq_s64", + "vdupq_laneq_u64", + "vext_f16", + "vext_f32", + "vext_s32", + "vext_u32", + "vext_s8", + "vextq_s16", + "vext_u8", + "vextq_u16", + "vext_p8", + "vextq_p16", + "vextq_f16", + "vextq_f32", + "vext_s16", + "vextq_s32", + "vext_u16", + "vextq_u32", + "vext_p16", + "vextq_s64", + "vextq_u64", + "vextq_s8", + "vextq_u8", + "vextq_p8", + "vfma_f16", + "vfmaq_f16", + "vfma_f32", + "vfmaq_f32", + "vfma_n_f32", + "vfmaq_n_f32", + "vfms_f16", + "vfmsq_f16", + "vfms_f32", + "vfmsq_f32", + "vfms_n_f32", + "vfmsq_n_f32", + "vget_high_f16", + "vget_low_f16", + "vget_lane_f16", + "vgetq_lane_f16", + "vld1_dup_f16", + "vld1q_dup_f16", + "vld1_f16", + "vld1_f16", + "vld1q_f16", + "vld1q_f16", + "vld1_f16_x2", + "vld1_f16_x3", + "vld1_f16_x4", + "vld1q_f16_x2", + "vld1q_f16_x3", + "vld1q_f16_x4", + "vld1_f32_x2", + "vld1_f32_x3", + "vld1_f32_x4", + "vld1q_f32_x2", + "vld1q_f32_x3", + "vld1q_f32_x4", + "vld1_lane_f16", + "vld1q_lane_f16", + "vld1_p64_x2", + "vld1_p64_x3", + "vld1_p64_x4", + "vld1q_p64_x2", + "vld1q_p64_x3", + "vld1q_p64_x4", + "vld1_s8_x2", + "vld1_s8_x3", + "vld1_s8_x4", + "vld1q_s8_x2", + "vld1q_s8_x3", + "vld1q_s8_x4", + "vld1_s16_x2", + "vld1_s16_x3", + "vld1_s16_x4", + "vld1q_s16_x2", + "vld1q_s16_x3", + "vld1q_s16_x4", + "vld1_s32_x2", + "vld1_s32_x3", + "vld1_s32_x4", + "vld1q_s32_x2", + "vld1q_s32_x3", + "vld1q_s32_x4", + "vld1_s64_x2", + "vld1_s64_x3", + "vld1_s64_x4", + "vld1q_s64_x2", + "vld1q_s64_x3", + "vld1q_s64_x4", + "vld1_u8_x2", + "vld1_u8_x3", + "vld1_u8_x4", + "vld1q_u8_x2", + "vld1q_u8_x3", + "vld1q_u8_x4", + "vld1_u16_x2", + "vld1_u16_x3", + "vld1_u16_x4", + "vld1q_u16_x2", + "vld1q_u16_x3", + "vld1q_u16_x4", + "vld1_u32_x2", + "vld1_u32_x3", + "vld1_u32_x4", + "vld1q_u32_x2", + "vld1q_u32_x3", + "vld1q_u32_x4", + "vld1_u64_x2", + "vld1_u64_x3", + "vld1_u64_x4", + "vld1q_u64_x2", + "vld1q_u64_x3", + "vld1q_u64_x4", + "vld1_p8_x2", + "vld1_p8_x3", + "vld1_p8_x4", + "vld1q_p8_x2", + "vld1q_p8_x3", + "vld1q_p8_x4", + "vld1_p16_x2", + "vld1_p16_x3", + "vld1_p16_x4", + "vld1q_p16_x2", + "vld1q_p16_x3", + "vld1q_p16_x4", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f16", + "vld2q_dup_f16", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_f32", + "vld2q_dup_f32", + "vld2_dup_s8", + "vld2q_dup_s8", + "vld2_dup_s16", + "vld2q_dup_s16", + "vld2_dup_s32", + "vld2q_dup_s32", + "vld2_dup_p64", + "vld2_dup_s64", + "vld2_dup_s64", + "vld2_dup_u64", + "vld2_dup_u8", + "vld2_dup_u8", + "vld2q_dup_u8", + "vld2q_dup_u8", + "vld2_dup_u16", + "vld2_dup_u16", + "vld2q_dup_u16", + "vld2q_dup_u16", + "vld2_dup_u32", + "vld2_dup_u32", + "vld2q_dup_u32", + "vld2q_dup_u32", + "vld2_dup_p8", + "vld2_dup_p8", + "vld2q_dup_p8", + "vld2q_dup_p8", + "vld2_dup_p16", + "vld2_dup_p16", + "vld2q_dup_p16", + "vld2q_dup_p16", + "vld2_f16", + "vld2q_f16", + "vld2_f16", + "vld2q_f16", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_f32", + "vld2q_f32", + "vld2_s8", + "vld2q_s8", + "vld2_s16", + "vld2q_s16", + "vld2_s32", + "vld2q_s32", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f16", + "vld2q_lane_f16", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2q_lane_s16", + "vld2_lane_s32", + "vld2q_lane_s32", + "vld2_lane_f32", + "vld2q_lane_f32", + "vld2q_lane_s16", + "vld2q_lane_s32", + "vld2_lane_s8", + "vld2_lane_s16", + "vld2_lane_s32", + "vld2_lane_u8", + "vld2_lane_u16", + "vld2q_lane_u16", + "vld2_lane_u32", + "vld2q_lane_u32", + "vld2_lane_p8", + "vld2_lane_p16", + "vld2q_lane_p16", + "vld2_p64", + "vld2_s64", + "vld2_s64", + "vld2_u64", + "vld2_u8", + "vld2q_u8", + "vld2_u16", + "vld2q_u16", + "vld2_u32", + "vld2q_u32", + "vld2_p8", + "vld2q_p8", + "vld2_p16", + "vld2q_p16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f16", + "vld3q_dup_f16", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_s64", + "vld3_dup_f32", + "vld3q_dup_f32", + "vld3_dup_s8", + "vld3q_dup_s8", + "vld3_dup_s16", + "vld3q_dup_s16", + "vld3_dup_s32", + "vld3q_dup_s32", + "vld3_dup_p64", + "vld3_dup_s64", + "vld3_dup_u64", + "vld3_dup_u8", + "vld3_dup_u8", + "vld3q_dup_u8", + "vld3q_dup_u8", + "vld3_dup_u16", + "vld3_dup_u16", + "vld3q_dup_u16", + "vld3q_dup_u16", + "vld3_dup_u32", + "vld3_dup_u32", + "vld3q_dup_u32", + "vld3q_dup_u32", + "vld3_dup_p8", + "vld3_dup_p8", + "vld3q_dup_p8", + "vld3q_dup_p8", + "vld3_dup_p16", + "vld3_dup_p16", + "vld3q_dup_p16", + "vld3q_dup_p16", + "vld3_f16", + "vld3q_f16", + "vld3_f16", + "vld3q_f16", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_f32", + "vld3q_f32", + "vld3_s8", + "vld3q_s8", + "vld3_s16", + "vld3q_s16", + "vld3_s32", + "vld3q_s32", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f16", + "vld3q_lane_f16", + "vld3_lane_f32", + "vld3q_lane_f32", + "vld3_lane_f32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_s8", + "vld3_lane_s16", + "vld3q_lane_s16", + "vld3_lane_s32", + "vld3q_lane_s32", + "vld3_lane_u8", + "vld3_lane_u16", + "vld3q_lane_u16", + "vld3_lane_u32", + "vld3q_lane_u32", + "vld3_lane_p8", + "vld3_lane_p16", + "vld3q_lane_p16", + "vld3_p64", + "vld3_s64", + "vld3_s64", + "vld3_u64", + "vld3_u8", + "vld3q_u8", + "vld3_u16", + "vld3q_u16", + "vld3_u32", + "vld3q_u32", + "vld3_p8", + "vld3q_p8", + "vld3_p16", + "vld3q_p16", + "vld3q_lane_f32", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f16", + "vld4q_dup_f16", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_f32", + "vld4q_dup_f32", + "vld4_dup_s8", + "vld4q_dup_s8", + "vld4_dup_s16", + "vld4q_dup_s16", + "vld4_dup_s32", + "vld4q_dup_s32", + "vld4_dup_s64", + "vld4_dup_p64", + "vld4_dup_s64", + "vld4_dup_u64", + "vld4_dup_u8", + "vld4_dup_u8", + "vld4q_dup_u8", + "vld4q_dup_u8", + "vld4_dup_u16", + "vld4_dup_u16", + "vld4q_dup_u16", + "vld4q_dup_u16", + "vld4_dup_u32", + "vld4_dup_u32", + "vld4q_dup_u32", + "vld4q_dup_u32", + "vld4_dup_p8", + "vld4_dup_p8", + "vld4q_dup_p8", + "vld4q_dup_p8", + "vld4_dup_p16", + "vld4_dup_p16", + "vld4q_dup_p16", + "vld4q_dup_p16", + "vld4_f16", + "vld4q_f16", + "vld4_f16", + "vld4q_f16", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_f32", + "vld4q_f32", + "vld4_s8", + "vld4q_s8", + "vld4_s16", + "vld4q_s16", + "vld4_s32", + "vld4q_s32", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f16", + "vld4q_lane_f16", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_f32", + "vld4q_lane_f32", + "vld4_lane_s8", + "vld4_lane_s16", + "vld4q_lane_s16", + "vld4_lane_s32", + "vld4q_lane_s32", + "vld4_lane_u8", + "vld4_lane_u16", + "vld4q_lane_u16", + "vld4_lane_u32", + "vld4q_lane_u32", + "vld4_lane_p8", + "vld4_lane_p16", + "vld4q_lane_p16", + "vld4_p64", + "vld4_s64", + "vld4_s64", + "vld4_u64", + "vld4_u8", + "vld4q_u8", + "vld4_u16", + "vld4q_u16", + "vld4_u32", + "vld4q_u32", + "vld4_p8", + "vld4q_p8", + "vld4_p16", + "vld4q_p16", + "vmax_f16", + "vmaxq_f16", + "vmax_f32", + "vmaxq_f32", + "vmax_s8", + "vmaxq_s8", + "vmax_s16", + "vmaxq_s16", + "vmax_s32", + "vmaxq_s32", + "vmax_u8", + "vmaxq_u8", + "vmax_u16", + "vmaxq_u16", + "vmax_u32", + "vmaxq_u32", + "vmaxnm_f16", + "vmaxnmq_f16", + "vmaxnm_f32", + "vmaxnmq_f32", + "vmin_f16", + "vminq_f16", + "vmin_f32", + "vminq_f32", + "vmin_s8", + "vminq_s8", + "vmin_s16", + "vminq_s16", + "vmin_s32", + "vminq_s32", + "vmin_u8", + "vminq_u8", + "vmin_u16", + "vminq_u16", + "vmin_u32", + "vminq_u32", + "vminnm_f16", + "vminnmq_f16", + "vminnm_f32", + "vminnmq_f32", + "vmla_f32", + "vmlaq_f32", + "vmla_lane_f32", + "vmla_laneq_f32", + "vmlaq_lane_f32", + "vmlaq_laneq_f32", + "vmla_lane_s16", + "vmla_lane_u16", + "vmla_laneq_s16", + "vmla_laneq_u16", + "vmlaq_lane_s16", + "vmlaq_lane_u16", + "vmlaq_laneq_s16", + "vmlaq_laneq_u16", + "vmla_lane_s32", + "vmla_lane_u32", + "vmla_laneq_s32", + "vmla_laneq_u32", + "vmlaq_lane_s32", + "vmlaq_lane_u32", + "vmlaq_laneq_s32", + "vmlaq_laneq_u32", + "vmla_n_f32", + "vmlaq_n_f32", + "vmla_n_s16", + "vmlaq_n_s16", + "vmla_n_u16", + "vmlaq_n_u16", + "vmla_n_s32", + "vmlaq_n_s32", + "vmla_n_u32", + "vmlaq_n_u32", + "vmla_s8", + "vmlaq_s8", + "vmla_s16", + "vmlaq_s16", + "vmla_s32", + "vmlaq_s32", + "vmla_u8", + "vmlaq_u8", + "vmla_u16", + "vmlaq_u16", + "vmla_u32", + "vmlaq_u32", + "vmlal_lane_s16", + "vmlal_laneq_s16", + "vmlal_lane_s32", + "vmlal_laneq_s32", + "vmlal_lane_u16", + "vmlal_laneq_u16", + "vmlal_lane_u32", + "vmlal_laneq_u32", + "vmlal_n_s16", + "vmlal_n_s32", + "vmlal_n_u16", + "vmlal_n_u32", + "vmlal_s8", + "vmlal_s16", + "vmlal_s32", + "vmlal_u8", + "vmlal_u16", + "vmlal_u32", + "vmls_f32", + "vmlsq_f32", + "vmls_lane_f32", + "vmls_laneq_f32", + "vmlsq_lane_f32", + "vmlsq_laneq_f32", + "vmls_lane_s16", + "vmls_lane_u16", + "vmls_laneq_s16", + "vmls_laneq_u16", + "vmlsq_lane_s16", + "vmlsq_lane_u16", + "vmlsq_laneq_s16", + "vmlsq_laneq_u16", + "vmls_lane_s32", + "vmls_lane_u32", + "vmls_laneq_s32", + "vmls_laneq_u32", + "vmlsq_lane_s32", + "vmlsq_lane_u32", + "vmlsq_laneq_s32", + "vmlsq_laneq_u32", + "vmls_n_f32", + "vmlsq_n_f32", + "vmls_n_s16", + "vmlsq_n_s16", + "vmls_n_u16", + "vmlsq_n_u16", + "vmls_n_s32", + "vmlsq_n_s32", + "vmls_n_u32", + "vmlsq_n_u32", + "vmls_s8", + "vmlsq_s8", + "vmls_s16", + "vmlsq_s16", + "vmls_s32", + "vmlsq_s32", + "vmls_u8", + "vmlsq_u8", + "vmls_u16", + "vmlsq_u16", + "vmls_u32", + "vmlsq_u32", + "vmlsl_lane_s16", + "vmlsl_laneq_s16", + "vmlsl_lane_s32", + "vmlsl_laneq_s32", + "vmlsl_lane_u16", + "vmlsl_laneq_u16", + "vmlsl_lane_u32", + "vmlsl_laneq_u32", + "vmlsl_n_s16", + "vmlsl_n_s32", + "vmlsl_n_u16", + "vmlsl_n_u32", + "vmlsl_s8", + "vmlsl_s16", + "vmlsl_s32", + "vmlsl_u8", + "vmlsl_u16", + "vmlsl_u32", + "vmmlaq_s32", + "vmmlaq_u32", + "vmov_n_f16", + "vmovq_n_f16", + "vmul_f16", + "vmulq_f16", + "vmul_lane_f16", + "vmulq_lane_f16", + "vmul_lane_f32", + "vmul_laneq_f32", + "vmulq_lane_f32", + "vmulq_laneq_f32", + "vmul_lane_s16", + "vmulq_lane_s16", + "vmul_lane_s32", + "vmulq_lane_s32", + "vmul_lane_u16", + "vmulq_lane_u16", + "vmul_lane_u32", + "vmulq_lane_u32", + "vmul_laneq_s16", + "vmulq_laneq_s16", + "vmul_laneq_s32", + "vmulq_laneq_s32", + "vmul_laneq_u16", + "vmulq_laneq_u16", + "vmul_laneq_u32", + "vmulq_laneq_u32", + "vmul_n_f16", + "vmulq_n_f16", + "vmul_n_f32", + "vmulq_n_f32", + "vmul_n_s16", + "vmulq_n_s16", + "vmul_n_s32", + "vmulq_n_s32", + "vmul_n_u16", + "vmulq_n_u16", + "vmul_n_u32", + "vmulq_n_u32", + "vmul_p8", + "vmulq_p8", + "vmull_lane_s16", + "vmull_laneq_s16", + "vmull_lane_s32", + "vmull_laneq_s32", + "vmull_lane_u16", + "vmull_laneq_u16", + "vmull_lane_u32", + "vmull_laneq_u32", + "vmull_n_s16", + "vmull_n_s32", + "vmull_n_u16", + "vmull_n_u32", + "vmull_p8", + "vmull_s16", + "vmull_s32", + "vmull_s8", + "vmull_u8", + "vmull_u16", + "vmull_u32", + "vneg_f16", + "vnegq_f16", + "vneg_f32", + "vnegq_f32", + "vneg_s8", + "vnegq_s8", + "vneg_s16", + "vnegq_s16", + "vneg_s32", + "vnegq_s32", + "vpadal_s8", + "vpadalq_s8", + "vpadal_s16", + "vpadalq_s16", + "vpadal_s32", + "vpadalq_s32", + "vpadal_u8", + "vpadalq_u8", + "vpadal_u16", + "vpadalq_u16", + "vpadal_u32", + "vpadalq_u32", + "vpadd_f16", + "vpadd_f32", + "vpadd_s8", + "vpadd_s16", + "vpadd_s32", + "vpadd_u8", + "vpadd_u8", + "vpadd_u16", + "vpadd_u16", + "vpadd_u32", + "vpadd_u32", + "vpaddl_s8", + "vpaddlq_s8", + "vpaddl_s16", + "vpaddlq_s16", + "vpaddl_s32", + "vpaddlq_s32", + "vpaddl_u8", + "vpaddlq_u8", + "vpaddl_u16", + "vpaddlq_u16", + "vpaddl_u32", + "vpaddlq_u32", + "vpmax_f32", + "vpmax_s8", + "vpmax_s16", + "vpmax_s32", + "vpmax_u8", + "vpmax_u16", + "vpmax_u32", + "vpmin_f32", + "vpmin_s8", + "vpmin_s16", + "vpmin_s32", + "vpmin_u8", + "vpmin_u16", + "vpmin_u32", + "vqabs_s8", + "vqabsq_s8", + "vqabs_s16", + "vqabsq_s16", + "vqabs_s32", + "vqabsq_s32", + "vqadd_s64", + "vqaddq_s64", + "vqadd_u64", + "vqaddq_u64", + "vqdmlal_lane_s16", + "vqdmlal_lane_s32", + "vqdmlal_n_s16", + "vqdmlal_n_s32", + "vqdmlal_s16", + "vqdmlal_s32", + "vqdmlsl_lane_s16", + "vqdmlsl_lane_s32", + "vqdmlsl_n_s16", + "vqdmlsl_n_s32", + "vqdmlsl_s16", + "vqdmlsl_s32", + "vqdmulh_laneq_s16", + "vqdmulhq_laneq_s16", + "vqdmulh_laneq_s32", + "vqdmulhq_laneq_s32", + "vqdmulh_n_s16", + "vqdmulhq_n_s16", + "vqdmulh_n_s32", + "vqdmulhq_n_s32", + "vqdmulh_s16", + "vqdmulhq_s16", + "vqdmulh_s32", + "vqdmulhq_s32", + "vqdmull_lane_s16", + "vqdmull_lane_s32", + "vqdmull_n_s16", + "vqdmull_n_s32", + "vqdmull_s16", + "vqdmull_s32", + "vqmovn_s16", + "vqmovn_s32", + "vqmovn_s64", + "vqmovn_u16", + "vqmovn_u32", + "vqmovn_u64", + "vqmovun_s16", + "vqmovun_s32", + "vqmovun_s64", + "vqneg_s8", + "vqnegq_s8", + "vqneg_s16", + "vqnegq_s16", + "vqneg_s32", + "vqnegq_s32", + "vqrdmulh_lane_s16", + "vqrdmulh_lane_s32", + "vqrdmulh_laneq_s16", + "vqrdmulh_laneq_s32", + "vqrdmulhq_lane_s16", + "vqrdmulhq_lane_s32", + "vqrdmulhq_laneq_s16", + "vqrdmulhq_laneq_s32", + "vqrdmulh_n_s16", + "vqrdmulhq_n_s16", + "vqrdmulh_n_s32", + "vqrdmulhq_n_s32", + "vqrdmulh_s16", + "vqrdmulhq_s16", + "vqrdmulh_s32", + "vqrdmulhq_s32", + "vqrshl_s8", + "vqrshlq_s8", + "vqrshl_s16", + "vqrshlq_s16", + "vqrshl_s32", + "vqrshlq_s32", + "vqrshl_s64", + "vqrshlq_s64", + "vqrshl_u8", + "vqrshlq_u8", + "vqrshl_u16", + "vqrshlq_u16", + "vqrshl_u32", + "vqrshlq_u32", + "vqrshl_u64", + "vqrshlq_u64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_s16", + "vqrshrn_n_s32", + "vqrshrn_n_s64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrn_n_u16", + "vqrshrn_n_u32", + "vqrshrn_n_u64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqrshrun_n_s16", + "vqrshrun_n_s32", + "vqrshrun_n_s64", + "vqshl_n_s8", + "vqshlq_n_s8", + "vqshl_n_s16", + "vqshlq_n_s16", + "vqshl_n_s32", + "vqshlq_n_s32", + "vqshl_n_s64", + "vqshlq_n_s64", + "vqshl_n_u8", + "vqshlq_n_u8", + "vqshl_n_u16", + "vqshlq_n_u16", + "vqshl_n_u32", + "vqshlq_n_u32", + "vqshl_n_u64", + "vqshlq_n_u64", + "vqshl_s8", + "vqshlq_s8", + "vqshl_s16", + "vqshlq_s16", + "vqshl_s32", + "vqshlq_s32", + "vqshl_s64", + "vqshlq_s64", + "vqshl_u8", + "vqshlq_u8", + "vqshl_u16", + "vqshlq_u16", + "vqshl_u32", + "vqshlq_u32", + "vqshl_u64", + "vqshlq_u64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshlu_n_s8", + "vqshluq_n_s8", + "vqshlu_n_s16", + "vqshluq_n_s16", + "vqshlu_n_s32", + "vqshluq_n_s32", + "vqshlu_n_s64", + "vqshluq_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_s16", + "vqshrn_n_s32", + "vqshrn_n_s64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrn_n_u16", + "vqshrn_n_u32", + "vqshrn_n_u64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqshrun_n_s16", + "vqshrun_n_s32", + "vqshrun_n_s64", + "vqsub_s64", + "vqsubq_s64", + "vqsub_u64", + "vqsubq_u64", + "vraddhn_high_s16", + "vraddhn_high_s32", + "vraddhn_high_s64", + "vraddhn_high_u16", + "vraddhn_high_u32", + "vraddhn_high_u64", + "vraddhn_s16", + "vraddhn_s32", + "vraddhn_s64", + "vraddhn_u16", + "vraddhn_u16", + "vraddhn_u32", + "vraddhn_u32", + "vraddhn_u64", + "vraddhn_u64", + "vrecpe_f16", + "vrecpeq_f16", + "vrecpe_f32", + "vrecpeq_f32", + "vrecpe_u32", + "vrecpeq_u32", + "vrecps_f16", + "vrecpsq_f16", + "vrecps_f32", + "vrecpsq_f32", + "vreinterpret_f32_f16", + "vreinterpret_f32_f16", + "vreinterpret_s8_f16", + "vreinterpret_s8_f16", + "vreinterpret_s16_f16", + "vreinterpret_s16_f16", + "vreinterpret_s32_f16", + "vreinterpret_s32_f16", + "vreinterpret_s64_f16", + "vreinterpret_s64_f16", + "vreinterpret_u8_f16", + "vreinterpret_u8_f16", + "vreinterpret_u16_f16", + "vreinterpret_u16_f16", + "vreinterpret_u32_f16", + "vreinterpret_u32_f16", + "vreinterpret_u64_f16", + "vreinterpret_u64_f16", + "vreinterpret_p8_f16", + "vreinterpret_p8_f16", + "vreinterpret_p16_f16", + "vreinterpret_p16_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_f32_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s8_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s16_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s32_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_s64_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u8_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u16_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u32_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_u64_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p8_f16", + "vreinterpretq_p16_f16", + "vreinterpretq_p16_f16", + "vreinterpret_f16_f32", + "vreinterpret_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpretq_f16_f32", + "vreinterpret_f16_s8", + "vreinterpret_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpretq_f16_s8", + "vreinterpret_f16_s16", + "vreinterpret_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpretq_f16_s16", + "vreinterpret_f16_s32", + "vreinterpret_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpretq_f16_s32", + "vreinterpret_f16_s64", + "vreinterpret_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpretq_f16_s64", + "vreinterpret_f16_u8", + "vreinterpret_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpretq_f16_u8", + "vreinterpret_f16_u16", + "vreinterpret_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpretq_f16_u16", + "vreinterpret_f16_u32", + "vreinterpret_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpretq_f16_u32", + "vreinterpret_f16_u64", + "vreinterpret_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpretq_f16_u64", + "vreinterpret_f16_p8", + "vreinterpret_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpretq_f16_p8", + "vreinterpret_f16_p16", + "vreinterpret_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p16", + "vreinterpretq_f16_p128", + "vreinterpretq_f16_p128", + "vreinterpret_p64_f16", + "vreinterpret_p64_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p128_f16", + "vreinterpretq_p64_f16", + "vreinterpretq_p64_f16", + "vreinterpret_f16_p64", + "vreinterpret_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f16_p64", + "vreinterpretq_f32_p128", + "vreinterpretq_f32_p128", + "vreinterpret_s8_f32", + "vreinterpret_s8_f32", + "vreinterpret_s16_f32", + "vreinterpret_s16_f32", + "vreinterpret_s32_f32", + "vreinterpret_s32_f32", + "vreinterpret_s64_f32", + "vreinterpret_s64_f32", + "vreinterpret_u8_f32", + "vreinterpret_u8_f32", + "vreinterpret_u16_f32", + "vreinterpret_u16_f32", + "vreinterpret_u32_f32", + "vreinterpret_u32_f32", + "vreinterpret_u64_f32", + "vreinterpret_u64_f32", + "vreinterpret_p8_f32", + "vreinterpret_p8_f32", + "vreinterpret_p16_f32", + "vreinterpret_p16_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_p128_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s8_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s16_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s32_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_s64_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u8_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u16_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u32_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_u64_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p8_f32", + "vreinterpretq_p16_f32", + "vreinterpretq_p16_f32", + "vreinterpret_f32_s8", + "vreinterpret_f32_s8", + "vreinterpret_s16_s8", + "vreinterpret_s16_s8", + "vreinterpret_s32_s8", + "vreinterpret_s32_s8", + "vreinterpret_s64_s8", + "vreinterpret_s64_s8", + "vreinterpret_u8_s8", + "vreinterpret_u8_s8", + "vreinterpret_u16_s8", + "vreinterpret_u16_s8", + "vreinterpret_u32_s8", + "vreinterpret_u32_s8", + "vreinterpret_u64_s8", + "vreinterpret_u64_s8", + "vreinterpret_p8_s8", + "vreinterpret_p8_s8", + "vreinterpret_p16_s8", + "vreinterpret_p16_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_f32_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s16_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s32_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_s64_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u8_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u16_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u32_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_u64_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p8_s8", + "vreinterpretq_p16_s8", + "vreinterpretq_p16_s8", + "vreinterpret_f32_s16", + "vreinterpret_f32_s16", + "vreinterpret_s8_s16", + "vreinterpret_s8_s16", + "vreinterpret_s32_s16", + "vreinterpret_s32_s16", + "vreinterpret_s64_s16", + "vreinterpret_s64_s16", + "vreinterpret_u8_s16", + "vreinterpret_u8_s16", + "vreinterpret_u16_s16", + "vreinterpret_u16_s16", + "vreinterpret_u32_s16", + "vreinterpret_u32_s16", + "vreinterpret_u64_s16", + "vreinterpret_u64_s16", + "vreinterpret_p8_s16", + "vreinterpret_p8_s16", + "vreinterpret_p16_s16", + "vreinterpret_p16_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_f32_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s8_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s32_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_s64_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u8_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u16_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u32_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_u64_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p8_s16", + "vreinterpretq_p16_s16", + "vreinterpretq_p16_s16", + "vreinterpret_f32_s32", + "vreinterpret_f32_s32", + "vreinterpret_s8_s32", + "vreinterpret_s8_s32", + "vreinterpret_s16_s32", + "vreinterpret_s16_s32", + "vreinterpret_s64_s32", + "vreinterpret_s64_s32", + "vreinterpret_u8_s32", + "vreinterpret_u8_s32", + "vreinterpret_u16_s32", + "vreinterpret_u16_s32", + "vreinterpret_u32_s32", + "vreinterpret_u32_s32", + "vreinterpret_u64_s32", + "vreinterpret_u64_s32", + "vreinterpret_p8_s32", + "vreinterpret_p8_s32", + "vreinterpret_p16_s32", + "vreinterpret_p16_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_f32_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s8_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s16_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_s64_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u8_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u16_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u32_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_u64_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p8_s32", + "vreinterpretq_p16_s32", + "vreinterpretq_p16_s32", + "vreinterpret_f32_s64", + "vreinterpret_f32_s64", + "vreinterpret_s8_s64", + "vreinterpret_s8_s64", + "vreinterpret_s16_s64", + "vreinterpret_s16_s64", + "vreinterpret_s32_s64", + "vreinterpret_s32_s64", + "vreinterpret_u8_s64", + "vreinterpret_u8_s64", + "vreinterpret_u16_s64", + "vreinterpret_u16_s64", + "vreinterpret_u32_s64", + "vreinterpret_u32_s64", + "vreinterpret_u64_s64", + "vreinterpret_p8_s64", + "vreinterpret_p8_s64", + "vreinterpret_p16_s64", + "vreinterpret_p16_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_f32_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s8_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s16_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_s32_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u8_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u16_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u32_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_u64_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p8_s64", + "vreinterpretq_p16_s64", + "vreinterpretq_p16_s64", + "vreinterpret_f32_u8", + "vreinterpret_f32_u8", + "vreinterpret_s8_u8", + "vreinterpret_s8_u8", + "vreinterpret_s16_u8", + "vreinterpret_s16_u8", + "vreinterpret_s32_u8", + "vreinterpret_s32_u8", + "vreinterpret_s64_u8", + "vreinterpret_s64_u8", + "vreinterpret_u16_u8", + "vreinterpret_u16_u8", + "vreinterpret_u32_u8", + "vreinterpret_u32_u8", + "vreinterpret_u64_u8", + "vreinterpret_u64_u8", + "vreinterpret_p8_u8", + "vreinterpret_p8_u8", + "vreinterpret_p16_u8", + "vreinterpret_p16_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_f32_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s8_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s16_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s32_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_s64_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u16_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u32_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_u64_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p8_u8", + "vreinterpretq_p16_u8", + "vreinterpretq_p16_u8", + "vreinterpret_f32_u16", + "vreinterpret_f32_u16", + "vreinterpret_s8_u16", + "vreinterpret_s8_u16", + "vreinterpret_s16_u16", + "vreinterpret_s16_u16", + "vreinterpret_s32_u16", + "vreinterpret_s32_u16", + "vreinterpret_s64_u16", + "vreinterpret_s64_u16", + "vreinterpret_u8_u16", + "vreinterpret_u8_u16", + "vreinterpret_u32_u16", + "vreinterpret_u32_u16", + "vreinterpret_u64_u16", + "vreinterpret_u64_u16", + "vreinterpret_p8_u16", + "vreinterpret_p8_u16", + "vreinterpret_p16_u16", + "vreinterpret_p16_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_f32_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s8_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s16_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s32_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_s64_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u8_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u32_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_u64_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p8_u16", + "vreinterpretq_p16_u16", + "vreinterpretq_p16_u16", + "vreinterpret_f32_u32", + "vreinterpret_f32_u32", + "vreinterpret_s8_u32", + "vreinterpret_s8_u32", + "vreinterpret_s16_u32", + "vreinterpret_s16_u32", + "vreinterpret_s32_u32", + "vreinterpret_s32_u32", + "vreinterpret_s64_u32", + "vreinterpret_s64_u32", + "vreinterpret_u8_u32", + "vreinterpret_u8_u32", + "vreinterpret_u16_u32", + "vreinterpret_u16_u32", + "vreinterpret_u64_u32", + "vreinterpret_u64_u32", + "vreinterpret_p8_u32", + "vreinterpret_p8_u32", + "vreinterpret_p16_u32", + "vreinterpret_p16_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_f32_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s8_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s16_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s32_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_s64_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u8_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u16_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_u64_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p8_u32", + "vreinterpretq_p16_u32", + "vreinterpretq_p16_u32", + "vreinterpret_f32_u64", + "vreinterpret_f32_u64", + "vreinterpret_s8_u64", + "vreinterpret_s8_u64", + "vreinterpret_s16_u64", + "vreinterpret_s16_u64", + "vreinterpret_s32_u64", + "vreinterpret_s32_u64", + "vreinterpret_s64_u64", + "vreinterpret_u8_u64", + "vreinterpret_u8_u64", + "vreinterpret_u16_u64", + "vreinterpret_u16_u64", + "vreinterpret_u32_u64", + "vreinterpret_u32_u64", + "vreinterpret_p8_u64", + "vreinterpret_p8_u64", + "vreinterpret_p16_u64", + "vreinterpret_p16_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_f32_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s8_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s16_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s32_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_s64_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u8_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u16_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_u32_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p8_u64", + "vreinterpretq_p16_u64", + "vreinterpretq_p16_u64", + "vreinterpret_f32_p8", + "vreinterpret_f32_p8", + "vreinterpret_s8_p8", + "vreinterpret_s8_p8", + "vreinterpret_s16_p8", + "vreinterpret_s16_p8", + "vreinterpret_s32_p8", + "vreinterpret_s32_p8", + "vreinterpret_s64_p8", + "vreinterpret_s64_p8", + "vreinterpret_u8_p8", + "vreinterpret_u8_p8", + "vreinterpret_u16_p8", + "vreinterpret_u16_p8", + "vreinterpret_u32_p8", + "vreinterpret_u32_p8", + "vreinterpret_u64_p8", + "vreinterpret_u64_p8", + "vreinterpret_p16_p8", + "vreinterpret_p16_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_f32_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s8_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s16_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s32_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_s64_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u8_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u16_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u32_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_u64_p8", + "vreinterpretq_p16_p8", + "vreinterpretq_p16_p8", + "vreinterpret_f32_p16", + "vreinterpret_f32_p16", + "vreinterpret_s8_p16", + "vreinterpret_s8_p16", + "vreinterpret_s16_p16", + "vreinterpret_s16_p16", + "vreinterpret_s32_p16", + "vreinterpret_s32_p16", + "vreinterpret_s64_p16", + "vreinterpret_s64_p16", + "vreinterpret_u8_p16", + "vreinterpret_u8_p16", + "vreinterpret_u16_p16", + "vreinterpret_u16_p16", + "vreinterpret_u32_p16", + "vreinterpret_u32_p16", + "vreinterpret_u64_p16", + "vreinterpret_u64_p16", + "vreinterpret_p8_p16", + "vreinterpret_p8_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_f32_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s8_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s16_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s32_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_s64_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u8_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u16_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u32_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_u64_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_p8_p16", + "vreinterpretq_s8_p128", + "vreinterpretq_s8_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s16_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s32_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_s64_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u8_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u16_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u32_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_u64_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p8_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p16_p128", + "vreinterpretq_p64_p128", + "vreinterpretq_p64_p128", + "vreinterpret_p64_s8", + "vreinterpret_p64_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p128_s8", + "vreinterpretq_p64_s8", + "vreinterpretq_p64_s8", + "vreinterpret_p64_s16", + "vreinterpret_p64_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p128_s16", + "vreinterpretq_p64_s16", + "vreinterpretq_p64_s16", + "vreinterpret_p64_s32", + "vreinterpret_p64_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p128_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p64_s32", + "vreinterpretq_p128_s64", + "vreinterpretq_p128_s64", + "vreinterpret_p64_u8", + "vreinterpret_p64_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p128_u8", + "vreinterpretq_p64_u8", + "vreinterpretq_p64_u8", + "vreinterpret_p64_u16", + "vreinterpret_p64_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p128_u16", + "vreinterpretq_p64_u16", + "vreinterpretq_p64_u16", + "vreinterpret_p64_u32", + "vreinterpret_p64_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p128_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p64_u32", + "vreinterpretq_p128_u64", + "vreinterpretq_p128_u64", + "vreinterpret_p64_p8", + "vreinterpret_p64_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p128_p8", + "vreinterpretq_p64_p8", + "vreinterpretq_p64_p8", + "vreinterpret_p64_p16", + "vreinterpret_p64_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p128_p16", + "vreinterpretq_p64_p16", + "vreinterpretq_p64_p16", + "vreinterpret_s8_p64", + "vreinterpret_s8_p64", + "vreinterpret_s16_p64", + "vreinterpret_s16_p64", + "vreinterpret_s32_p64", + "vreinterpret_s32_p64", + "vreinterpret_u8_p64", + "vreinterpret_u8_p64", + "vreinterpret_u16_p64", + "vreinterpret_u16_p64", + "vreinterpret_u32_p64", + "vreinterpret_u32_p64", + "vreinterpret_p8_p64", + "vreinterpret_p8_p64", + "vreinterpret_p16_p64", + "vreinterpret_p16_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_p128_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s8_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s16_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_s32_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u8_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u16_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_u32_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p8_p64", + "vreinterpretq_p16_p64", + "vreinterpretq_p16_p64", + "vrev64_f16", + "vrev64q_f16", + "vrndn_f16", + "vrndnq_f16", + "vrndn_f32", + "vrndnq_f32", + "vrshl_s8", + "vrshlq_s8", + "vrshl_s16", + "vrshlq_s16", + "vrshl_s32", + "vrshlq_s32", + "vrshl_s64", + "vrshlq_s64", + "vrshl_u8", + "vrshlq_u8", + "vrshl_u16", + "vrshlq_u16", + "vrshl_u32", + "vrshlq_u32", + "vrshl_u64", + "vrshlq_u64", + "vrshr_n_s8", + "vrshrq_n_s8", + "vrshr_n_s16", + "vrshrq_n_s16", + "vrshr_n_s32", + "vrshrq_n_s32", + "vrshr_n_s64", + "vrshrq_n_s64", + "vrshr_n_u8", + "vrshrq_n_u8", + "vrshr_n_u16", + "vrshrq_n_u16", + "vrshr_n_u32", + "vrshrq_n_u32", + "vrshr_n_u64", + "vrshrq_n_u64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_s16", + "vrshrn_n_s32", + "vrshrn_n_s64", + "vrshrn_n_u16", + "vrshrn_n_u32", + "vrshrn_n_u64", + "vrsqrte_f16", + "vrsqrteq_f16", + "vrsqrteq_f32", + "vrsqrte_u32", + "vrsqrteq_u32", + "vrsqrts_f16", + "vrsqrtsq_f16", + "vrsqrts_f32", + "vrsqrtsq_f32", + "vrsra_n_s8", + "vrsraq_n_s8", + "vrsra_n_s16", + "vrsraq_n_s16", + "vrsra_n_s32", + "vrsraq_n_s32", + "vrsra_n_s64", + "vrsraq_n_s64", + "vrsra_n_u8", + "vrsraq_n_u8", + "vrsra_n_u16", + "vrsraq_n_u16", + "vrsra_n_u32", + "vrsraq_n_u32", + "vrsra_n_u64", + "vrsraq_n_u64", + "vrsubhn_s16", + "vrsubhn_s32", + "vrsubhn_s64", + "vrsubhn_u16", + "vrsubhn_u16", + "vrsubhn_u32", + "vrsubhn_u32", + "vrsubhn_u64", + "vrsubhn_u64", + "vset_lane_f16", + "vsetq_lane_f16", + "vset_lane_f32", + "vsetq_lane_f32", + "vset_lane_s8", + "vsetq_lane_s8", + "vset_lane_s16", + "vsetq_lane_s16", + "vset_lane_s32", + "vsetq_lane_s32", + "vsetq_lane_s64", + "vset_lane_u8", + "vsetq_lane_u8", + "vset_lane_u16", + "vsetq_lane_u16", + "vset_lane_u32", + "vsetq_lane_u32", + "vsetq_lane_u64", + "vset_lane_p8", + "vsetq_lane_p8", + "vset_lane_p16", + "vsetq_lane_p16", + "vset_lane_p64", + "vset_lane_s64", + "vset_lane_u64", + "vsetq_lane_p64", + "vsha1cq_u32", + "vsha1h_u32", + "vsha1mq_u32", + "vsha1pq_u32", + "vsha1su0q_u32", + "vsha1su1q_u32", + "vsha256h2q_u32", + "vsha256hq_u32", + "vsha256su0q_u32", + "vsha256su1q_u32", + "vshl_n_s8", + "vshlq_n_s8", + "vshl_n_s16", + "vshlq_n_s16", + "vshl_n_s32", + "vshlq_n_s32", + "vshl_n_s64", + "vshlq_n_s64", + "vshl_n_u8", + "vshlq_n_u8", + "vshl_n_u16", + "vshlq_n_u16", + "vshl_n_u32", + "vshlq_n_u32", + "vshl_n_u64", + "vshlq_n_u64", + "vshl_s8", + "vshlq_s8", + "vshl_s16", + "vshlq_s16", + "vshl_s32", + "vshlq_s32", + "vshl_s64", + "vshlq_s64", + "vshl_u8", + "vshlq_u8", + "vshl_u16", + "vshlq_u16", + "vshl_u32", + "vshlq_u32", + "vshl_u64", + "vshlq_u64", + "vshll_n_s16", + "vshll_n_s32", + "vshll_n_s8", + "vshll_n_u16", + "vshll_n_u32", + "vshll_n_u8", + "vshr_n_s8", + "vshrq_n_s8", + "vshr_n_s16", + "vshrq_n_s16", + "vshr_n_s32", + "vshrq_n_s32", + "vshr_n_s64", + "vshrq_n_s64", + "vshr_n_u8", + "vshrq_n_u8", + "vshr_n_u16", + "vshrq_n_u16", + "vshr_n_u32", + "vshrq_n_u32", + "vshr_n_u64", + "vshrq_n_u64", + "vshrn_n_s16", + "vshrn_n_s32", + "vshrn_n_s64", + "vshrn_n_u16", + "vshrn_n_u32", + "vshrn_n_u64", + "vsra_n_s8", + "vsraq_n_s8", + "vsra_n_s16", + "vsraq_n_s16", + "vsra_n_s32", + "vsraq_n_s32", + "vsra_n_s64", + "vsraq_n_s64", + "vsra_n_u8", + "vsraq_n_u8", + "vsra_n_u16", + "vsraq_n_u16", + "vsra_n_u32", + "vsraq_n_u32", + "vsra_n_u64", + "vsraq_n_u64", + "vst1_f16", + "vst1q_f16", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x2", + "vst1q_f16_x2", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x3", + "vst1q_f16_x3", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f16_x4", + "vst1q_f16_x4", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x2", + "vst1q_f32_x2", + "vst1_f32_x3", + "vst1q_f32_x3", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_f32_x4", + "vst1q_f32_x4", + "vst1_lane_f16", + "vst1q_lane_f16", + "vst1_lane_f32", + "vst1q_lane_f32", + "vst1_lane_s8", + "vst1q_lane_s8", + "vst1_lane_s16", + "vst1q_lane_s16", + "vst1_lane_s32", + "vst1q_lane_s32", + "vst1q_lane_s64", + "vst1_lane_u8", + "vst1q_lane_u8", + "vst1_lane_u16", + "vst1q_lane_u16", + "vst1_lane_u32", + "vst1q_lane_u32", + "vst1q_lane_u64", + "vst1_lane_p8", + "vst1q_lane_p8", + "vst1_lane_p16", + "vst1q_lane_p16", + "vst1_lane_p64", + "vst1_lane_s64", + "vst1_lane_u64", + "vst1_p64_x2", + "vst1_p64_x3", + "vst1_p64_x4", + "vst1q_p64_x2", + "vst1q_p64_x3", + "vst1q_p64_x4", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x2", + "vst1q_s8_x2", + "vst1_s16_x2", + "vst1q_s16_x2", + "vst1_s32_x2", + "vst1q_s32_x2", + "vst1_s64_x2", + "vst1q_s64_x2", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x3", + "vst1q_s8_x3", + "vst1_s16_x3", + "vst1q_s16_x3", + "vst1_s32_x3", + "vst1q_s32_x3", + "vst1_s64_x3", + "vst1q_s64_x3", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_s8_x4", + "vst1q_s8_x4", + "vst1_s16_x4", + "vst1q_s16_x4", + "vst1_s32_x4", + "vst1q_s32_x4", + "vst1_s64_x4", + "vst1q_s64_x4", + "vst1_u8_x2", + "vst1_u8_x3", + "vst1_u8_x4", + "vst1q_u8_x2", + "vst1q_u8_x3", + "vst1q_u8_x4", + "vst1_u16_x2", + "vst1_u16_x3", + "vst1_u16_x4", + "vst1q_u16_x2", + "vst1q_u16_x3", + "vst1q_u16_x4", + "vst1_u32_x2", + "vst1_u32_x3", + "vst1_u32_x4", + "vst1q_u32_x2", + "vst1q_u32_x3", + "vst1q_u32_x4", + "vst1_u64_x2", + "vst1_u64_x3", + "vst1_u64_x4", + "vst1q_u64_x2", + "vst1q_u64_x3", + "vst1q_u64_x4", + "vst1_p8_x2", + "vst1_p8_x3", + "vst1_p8_x4", + "vst1q_p8_x2", + "vst1q_p8_x3", + "vst1q_p8_x4", + "vst1_p16_x2", + "vst1_p16_x3", + "vst1_p16_x4", + "vst1q_p16_x2", + "vst1q_p16_x3", + "vst1q_p16_x4", + "vst1q_lane_p64", + "vst2_f16", + "vst2q_f16", + "vst2_f16", + "vst2q_f16", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_f32", + "vst2q_f32", + "vst2_s8", + "vst2q_s8", + "vst2_s16", + "vst2q_s16", + "vst2_s32", + "vst2q_s32", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f16", + "vst2q_lane_f16", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_f32", + "vst2q_lane_f32", + "vst2_lane_s8", + "vst2_lane_s16", + "vst2q_lane_s16", + "vst2_lane_s32", + "vst2q_lane_s32", + "vst2_lane_u8", + "vst2_lane_u16", + "vst2q_lane_u16", + "vst2_lane_u32", + "vst2q_lane_u32", + "vst2_lane_p8", + "vst2_lane_p16", + "vst2q_lane_p16", + "vst2_p64", + "vst2_s64", + "vst2_s64", + "vst2_u64", + "vst2_u8", + "vst2q_u8", + "vst2_u16", + "vst2q_u16", + "vst2_u32", + "vst2q_u32", + "vst2_p8", + "vst2q_p8", + "vst2_p16", + "vst2q_p16", + "vst3_f16", + "vst3q_f16", + "vst3_f16", + "vst3q_f16", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_f32", + "vst3q_f32", + "vst3_s8", + "vst3q_s8", + "vst3_s16", + "vst3q_s16", + "vst3_s32", + "vst3q_s32", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f16", + "vst3q_lane_f16", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_f32", + "vst3q_lane_f32", + "vst3_lane_s8", + "vst3_lane_s16", + "vst3q_lane_s16", + "vst3_lane_s32", + "vst3q_lane_s32", + "vst3_lane_u8", + "vst3_lane_u16", + "vst3q_lane_u16", + "vst3_lane_u32", + "vst3q_lane_u32", + "vst3_lane_p8", + "vst3_lane_p16", + "vst3q_lane_p16", + "vst3_p64", + "vst3_s64", + "vst3_s64", + "vst3_u64", + "vst3_u8", + "vst3q_u8", + "vst3_u16", + "vst3q_u16", + "vst3_u32", + "vst3q_u32", + "vst3_p8", + "vst3q_p8", + "vst3_p16", + "vst3q_p16", + "vst4_f16", + "vst4q_f16", + "vst4_f16", + "vst4q_f16", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_f32", + "vst4q_f32", + "vst4_s8", + "vst4q_s8", + "vst4_s16", + "vst4q_s16", + "vst4_s32", + "vst4q_s32", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f16", + "vst4q_lane_f16", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_f32", + "vst4q_lane_f32", + "vst4_lane_s8", + "vst4_lane_s16", + "vst4q_lane_s16", + "vst4_lane_s32", + "vst4q_lane_s32", + "vst4_lane_u8", + "vst4_lane_u16", + "vst4q_lane_u16", + "vst4_lane_u32", + "vst4q_lane_u32", + "vst4_lane_p8", + "vst4_lane_p16", + "vst4q_lane_p16", + "vst4_p64", + "vst4_s64", + "vst4_s64", + "vst4_u64", + "vst4_u8", + "vst4q_u8", + "vst4_u16", + "vst4q_u16", + "vst4_u32", + "vst4q_u32", + "vst4_p8", + "vst4q_p8", + "vst4_p16", + "vst4q_p16", + "vsub_f16", + "vsubq_f16", + "vsub_s64", + "vsubq_s64", + "vsub_u64", + "vsubq_u64", + "vsubhn_high_s16", + "vsubhn_high_s32", + "vsubhn_high_s64", + "vsubhn_high_u16", + "vsubhn_high_u32", + "vsubhn_high_u64", + "vsubhn_s16", + "vsubhn_s32", + "vsubhn_s64", + "vsubhn_u16", + "vsubhn_u32", + "vsubhn_u64", + "vsubl_s8", + "vsubl_s16", + "vsubl_s32", + "vsubl_u8", + "vsubl_u16", + "vsubl_u32", + "vsubw_s8", + "vsubw_s16", + "vsubw_s32", + "vsubw_u8", + "vsubw_u16", + "vsubw_u32", + "vsudot_lane_s32", + "vsudot_lane_s32", + "vsudotq_lane_s32", + "vsudotq_lane_s32", + "vsudot_laneq_s32", + "vsudotq_laneq_s32", + "vtrn_f16", + "vtrnq_f16", + "vtrn_f32", + "vtrn_s32", + "vtrn_u32", + "vtrnq_f32", + "vtrn_s8", + "vtrnq_s8", + "vtrn_s16", + "vtrnq_s16", + "vtrnq_s32", + "vtrn_u8", + "vtrnq_u8", + "vtrn_u16", + "vtrnq_u16", + "vtrnq_u32", + "vtrn_p8", + "vtrnq_p8", + "vtrn_p16", + "vtrnq_p16", + "vtst_s8", + "vtstq_s8", + "vtst_s16", + "vtstq_s16", + "vtst_s32", + "vtstq_s32", + "vtst_p8", + "vtstq_p8", + "vtst_p16", + "vtstq_p16", + "vtst_u8", + "vtstq_u8", + "vtst_u16", + "vtstq_u16", + "vtst_u32", + "vtstq_u32", + "vusdot_lane_s32", + "vusdot_lane_s32", + "vusdotq_lane_s32", + "vusdotq_lane_s32", + "vusdot_laneq_s32", + "vusdot_laneq_s32", + "vusdotq_laneq_s32", + "vusdotq_laneq_s32", + "vusdot_s32", + "vusdotq_s32", + "vusmmlaq_s32", + "vuzp_f16", + "vuzpq_f16", + "vuzp_f32", + "vuzp_s32", + "vuzp_u32", + "vuzpq_f32", + "vuzp_s8", + "vuzpq_s8", + "vuzp_s16", + "vuzpq_s16", + "vuzpq_s32", + "vuzp_u8", + "vuzpq_u8", + "vuzp_u16", + "vuzpq_u16", + "vuzpq_u32", + "vuzp_p8", + "vuzpq_p8", + "vuzp_p16", + "vuzpq_p16", + "vzip_f16", + "vzipq_f16", + "vzip_f32", + "vzip_s32", + "vzip_u32", + "vzip_s8", + "vzip_s16", + "vzip_u8", + "vzip_u16", + "vzip_p8", + "vzip_p16", + "vzipq_f32", + "vzipq_s8", + "vzipq_s16", + "vzipq_s32", + "vzipq_u8", + "vzipq_u16", + "vzipq_u32", + "vzipq_p8", + "vzipq_p16", + "__rndr", + "__rndrrs", +]; From 2637e0806bc952dcdccaa1c8c6837737612fa1f9 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 02/20] gen-arm: add `generate_load_store_tests` Instead of generating load/store tests based on the input filename - which no longer works given the expected input file structure of `stdarch-gen-arm` - add a simple global context option that SVE specs can set. --- library/stdarch/crates/stdarch-gen-arm/src/context.rs | 4 ++++ library/stdarch/crates/stdarch-gen-arm/src/main.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/context.rs b/library/stdarch/crates/stdarch-gen-arm/src/context.rs index 9b8eb8e8b9bf..4d02a82b8966 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/context.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/context.rs @@ -43,6 +43,10 @@ pub struct GlobalContext { /// Should all LLVM wrappers convert their arguments to a signed type #[serde(default)] pub auto_llvm_sign_conversion: bool, + + /// Should SVE load/store tests be generated? + #[serde(default)] + pub generate_load_store_tests: bool, } /// Context of an intrinsic group diff --git a/library/stdarch/crates/stdarch-gen-arm/src/main.rs b/library/stdarch/crates/stdarch-gen-arm/src/main.rs index e14e2782485b..b7e2aa416fb5 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), String> { vv.into_iter().flatten().collect_vec() })?; - if filepath.ends_with("sve.spec.yml") || filepath.ends_with("sve2.spec.yml") { + if input.ctx.generate_load_store_tests { let loads = intrinsics.iter() .filter_map(|i| { if matches!(i.test, Test::Load(..)) { From 8077797d754474b09f69ac5b0b4b9616e4a41230 Mon Sep 17 00:00:00 2001 From: David Wood Date: Sat, 28 Feb 2026 18:00:16 +0000 Subject: [PATCH 03/20] gen-arm: remove `SvUndef` The `SvUndef` expression is no longer necessary as a `core::intrinsics::scalable::sve_undef` intrinsic has been introduced to produce an undefined SVE vector, used by `svundef*` intrinsics. Other intrinsics that used `SvUndef` now use the `svundef*` intrinsics. --- library/stdarch/crates/stdarch-gen-arm/README.md | 3 --- .../stdarch/crates/stdarch-gen-arm/src/expression.rs | 12 ++---------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/README.md b/library/stdarch/crates/stdarch-gen-arm/README.md index 64f1183f1d6d..970721681c04 100644 --- a/library/stdarch/crates/stdarch-gen-arm/README.md +++ b/library/stdarch/crates/stdarch-gen-arm/README.md @@ -205,9 +205,6 @@ MatchKind: - `Array` - An array of expressions - Usage: `Array: [, ...]` -- `SvUndef` - - Returns the LLVM `undef` symbol - - Usage: `SvUndef` - `Multiply` - Simply `*` - Usage: `Multiply: [, ]` diff --git a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs index bf48f0dab749..0b6ffef9d8d3 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs @@ -143,8 +143,6 @@ pub enum Expression { LLVMLink(LLVMLink), /// Casts the given expression to the specified (unchecked) type CastAs(Box, String), - /// Returns the LLVM `undef` symbol - SvUndef, /// Multiplication Multiply(Box, Box), /// Xor @@ -295,7 +293,7 @@ pub fn build(&mut self, intrinsic: &Intrinsic, ctx: &mut Context) -> context::Re /// - An unnecessary `unsafe` is a warning, made into an error by the CI's `-D warnings`. /// /// This **panics** if it encounters an expression that shouldn't appear in a safe function at - /// all (such as `SvUndef`). + /// all. pub fn requires_unsafe_wrapper(&self, ctx_fn: &str) -> bool { match self { // The call will need to be unsafe, but the declaration does not. @@ -347,9 +345,6 @@ pub fn requires_unsafe_wrapper(&self, ctx_fn: &str) -> bool { }, // We only use macros to check const generics (using static assertions). Self::MacroCall(_name, _args) => false, - // Materialising uninitialised values is always unsafe, and we avoid it in safe - // functions. - Self::SvUndef => panic!("Refusing to wrap unsafe SvUndef in safe function '{ctx_fn}'."), // Variants that aren't tokenised. We shouldn't encounter these here. Self::MatchKind(..) => { unimplemented!("The unsafety of {self:?} cannot be determined in '{ctx_fn}'.") @@ -390,9 +385,7 @@ fn from_str(s: &str) -> Result { static MACRO_RE: LazyLock = LazyLock::new(|| Regex::new(r"^(?P[\w\d_]+)!\((?P.*?)\);?$").unwrap()); - if s == "SvUndef" { - Ok(Expression::SvUndef) - } else if MACRO_RE.is_match(s) { + if MACRO_RE.is_match(s) { let c = MACRO_RE.captures(s).unwrap(); let ex = c["ex"].to_string(); let _: TokenStream = ex @@ -533,7 +526,6 @@ fn to_tokens(&self, tokens: &mut TokenStream) { let ty: TokenStream = ty.parse().expect("invalid syntax"); tokens.append_all(quote! { #ex as #ty }) } - Self::SvUndef => tokens.append_all(quote! { simd_reinterpret(()) }), Self::Multiply(lhs, rhs) => tokens.append_all(quote! { #lhs * #rhs }), Self::Xor(lhs, rhs) => tokens.append_all(quote! { #lhs ^ #rhs }), Self::Type(ty) => ty.to_tokens(tokens), From 55b65ff1ee6ed650d2f50fa30968f91d7cf418a6 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 04/20] gen-arm: s/simd_reinterpret/transmute_unchecked `simd_reinterpret` was expected to be used when it was added as `transmute_unchecked` requires `Sized`, but scalable vectors are now `Sized` so `transmute_unchecked` can be used and `simd_reinterpret` was not added in rust-lang/rust#143924. --- library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs index bd47ff2bd155..c3aa22294d9f 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/typekinds.rs @@ -289,9 +289,9 @@ pub fn express_reinterpretation_from( ( BaseType::Sized(Float | Int | UInt, _), BaseType::Sized(Float | Int | UInt, _), - ) => Some(FnCall::new_expression( + ) => Some(FnCall::new_unsafe_expression( // Conversions between float and (u)int, or where the lane size changes. - "simd_reinterpret".parse().unwrap(), + "transmute_unchecked".parse().unwrap(), vec![expr.into()], )), _ => None, From c8840791439caf5709b6a7d77cb309a37925b0d8 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 05/20] gen-arm: `auto-llvm-sign-conversion` not for `into` Matching the current behaviour for arguments, `auto_llvm_sign_conversion` should only be required for `as_unsigned` conversions, not `into` conversions. --- .../crates/stdarch-gen-arm/src/intrinsic.rs | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index ce427d54b355..e20ab6779cfc 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -647,27 +647,26 @@ pub fn apply_conversions_to_call( }) .try_collect()?; - let return_type_conversion = if !ctx.global.auto_llvm_sign_conversion { - None - } else { - self.signature - .as_ref() - .and_then(|sig| sig.return_type.as_ref()) - .and_then(|ty| { - if let Some(Sized(Bool, bitsize)) = ty.base_type() { - (*bitsize != 8).then_some(Bool) - } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { - Some(UInt) - } else { - None - } - }) - }; + let return_type_conversion = self + .signature + .as_ref() + .and_then(|sig| sig.return_type.as_ref()) + .and_then(|ty| { + if let Some(Sized(Bool, bitsize)) = ty.base_type() { + (*bitsize != 8).then_some(Bool) + } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() { + Some(UInt) + } else { + None + } + }); let fn_call = Expression::FnCall(fn_call); match return_type_conversion { Some(Bool) => Ok(convert("into", fn_call)), - Some(UInt) => Ok(convert("as_unsigned", fn_call)), + Some(UInt) if ctx.global.auto_llvm_sign_conversion => { + Ok(convert("as_unsigned", fn_call)) + } _ => Ok(fn_call), } } From 21201482754b7ede778d407435e4656b9d23674b Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 06/20] core_arch: add `static_assert_range` This is a convenience macro used by the generated SVE intrinsics. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- library/stdarch/crates/core_arch/src/macros.rs | 16 ++++++++++++++++ .../crates/stdarch-gen-arm/src/context.rs | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index 00e92428b3e7..83039bc65acc 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -14,6 +14,22 @@ macro_rules! static_assert { }; } +#[allow(unused_macros)] +macro_rules! static_assert_range { + ($imm:ident, $min:literal..=$max:literal) => { + static_assert!( + $min <= $imm && $imm <= $max, + concat!( + stringify!($imm), + " is not in range ", + stringify!($min), + "-", + stringify!($max), + ) + ) + }; +} + #[allow(unused_macros)] macro_rules! static_assert_uimm_bits { ($imm:ident, $bits:expr) => { diff --git a/library/stdarch/crates/stdarch-gen-arm/src/context.rs b/library/stdarch/crates/stdarch-gen-arm/src/context.rs index 4d02a82b8966..85342a180485 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/context.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/context.rs @@ -222,7 +222,7 @@ pub fn make_assertion_from_constraint(&self, constraint: &Constraint) -> Result< } => Ok(Expression::MacroCall( "static_assert_range".to_string(), format!( - "{variable}, {min}, {max}", + "{variable}, {min}..={max}", min = range.start(), max = range.end() ), @@ -250,7 +250,7 @@ pub fn make_assertion_from_constraint(&self, constraint: &Constraint) -> Result< |bitsize| Ok(higher_limit / bitsize - 1))?; Ok(Expression::MacroCall( "static_assert_range".to_string(), - format!("{variable}, 0, {max}"), + format!("{variable}, 0..={max}"), )) } else { Err(format!( From 826ab8ba0ec76ba9ebb81f7fe10733fdb1944f40 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 07/20] core_arch: sve types Add the SVE types (without any of the generated intrinsics) and empty modules where the generated intrinsics will be. Enables the `adt_const_params` crate feature that the generated intrinsics will use. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../crates/core_arch/src/aarch64/mod.rs | 8 + .../core_arch/src/aarch64/sve/generated.rs | 1 + .../crates/core_arch/src/aarch64/sve/mod.rs | 379 ++++++++++++++++++ .../core_arch/src/aarch64/sve2/generated.rs | 1 + .../crates/core_arch/src/aarch64/sve2/mod.rs | 17 + library/stdarch/crates/core_arch/src/lib.rs | 3 +- 6 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index d7295659c3c9..9376e04b3b53 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -25,6 +25,14 @@ #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; +mod sve; +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve::*; + +mod sve2; +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::sve2::*; + mod prefetch; #[unstable(feature = "stdarch_aarch64_prefetch", issue = "117217")] pub use self::prefetch::*; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -0,0 +1 @@ + diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs new file mode 100644 index 000000000000..a3f70ab61c40 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -0,0 +1,379 @@ +//! SVE intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use crate::intrinsics::{simd::*, *}; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +use crate::{marker::ConstParamTy, mem::transmute}; + +pub(super) trait AsUnsigned { + type Unsigned; + unsafe fn as_unsigned(self) -> Self::Unsigned; +} + +pub(super) trait AsSigned { + type Signed; + unsafe fn as_signed(self) -> Self::Signed; +} + +/// Same as `Into` but with into being unsafe so that it can have the required `target_feature` +pub(super) trait SveInto: Sized { + unsafe fn sve_into(self) -> T; +} + +macro_rules! impl_sve_type { + ($(($v:vis, $elem_type:ty, $name:ident, $elt:literal))*) => ($( + #[doc = concat!("Scalable vector of type ", stringify!($elem_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector($elt)] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($elem_type); + )*) +} + +macro_rules! impl_sve_tuple_type { + ($(($v:vis, $vec_type:ty, $elt:tt, $name:ident))*) => ($( + impl_sve_tuple_type!(@ ($v, $vec_type, $elt, $name)); + )*); + (@ ($v:vis, $vec_type:ty, 2, $name:ident)) => ( + #[doc = concat!("Two-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 3, $name:ident)) => ( + #[doc = concat!("Three-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type); + ); + (@ ($v:vis, $vec_type:ty, 4, $name:ident)) => ( + #[doc = concat!("Four-element tuple of scalable vectors of type ", stringify!($vec_type))] + #[derive(Clone, Copy, Debug)] + #[rustc_scalable_vector] + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + $v struct $name($vec_type, $vec_type, $vec_type, $vec_type); + ); +} + +macro_rules! impl_sign_conversions_sv { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute_unchecked(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute_unchecked(self) + } + } + )*) +} + +macro_rules! impl_sign_conversions { + ($(($signed:ty, $unsigned:ty))*) => ($( + impl AsUnsigned for $signed { + type Unsigned = $unsigned; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_unsigned(self) -> $unsigned { + transmute(self) + } + } + + impl AsSigned for $unsigned { + type Signed = $signed; + + #[inline] + #[target_feature(enable = "sve")] + unsafe fn as_signed(self) -> $signed { + transmute(self) + } + } + )*) +} + +/// LLVM requires the predicate lane count to be the same as the lane count +/// it's working with. However the ACLE only defines one bool type and the +/// instruction set doesn't have this distinction. As a result we have to +/// create these internal types so we can match the LLVM signature. Each of +/// these internal types can be converted to the public `svbool_t` type and +/// the `svbool_t` type can be converted into these. +macro_rules! impl_internal_sve_predicate { + ($(($name:ident, $elt:literal))*) => ($( + impl_sve_type! { + (pub(super), bool, $name, $elt) + } + + impl SveInto for $name { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> svbool_t { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.to.svbool.nxv", $elt, "i1") + )] + fn convert_to_svbool(b: $name) -> svbool_t; + } + unsafe { convert_to_svbool(self) } + } + } + + #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] + impl SveInto<$name> for svbool_t { + #[inline] + #[target_feature(enable = "sve")] + unsafe fn sve_into(self) -> $name { + #[allow(improper_ctypes)] + unsafe extern "C" { + #[cfg_attr( + target_arch = "aarch64", + link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv", $elt, "i1") + )] + fn convert_from_svbool(b: svbool_t) -> $name; + } + unsafe { convert_from_svbool(self) } + } + } + )*) +} + +impl_sve_type! { + (pub, bool, svbool_t, 16) + + (pub, i8, svint8_t, 16) + (pub, u8, svuint8_t, 16) + + (pub, i16, svint16_t, 8) + (pub, u16, svuint16_t, 8) + (pub, f32, svfloat32_t, 4) + (pub, i32, svint32_t, 4) + (pub, u32, svuint32_t, 4) + (pub, f64, svfloat64_t, 2) + (pub, i64, svint64_t, 2) + (pub, u64, svuint64_t, 2) + + // Internal types: + (pub(super), i8, nxv2i8, 2) + (pub(super), i8, nxv4i8, 4) + (pub(super), i8, nxv8i8, 8) + + (pub(super), i16, nxv2i16, 2) + (pub(super), i16, nxv4i16, 4) + + (pub(super), i32, nxv2i32, 2) + + (pub(super), u8, nxv2u8, 2) + (pub(super), u8, nxv4u8, 4) + (pub(super), u8, nxv8u8, 8) + + (pub(super), u16, nxv2u16, 2) + (pub(super), u16, nxv4u16, 4) + + (pub(super), u32, nxv2u32, 2) +} + +impl_sve_tuple_type! { + (pub, svint8_t, 2, svint8x2_t) + (pub, svuint8_t, 2, svuint8x2_t) + (pub, svint16_t, 2, svint16x2_t) + (pub, svuint16_t, 2, svuint16x2_t) + (pub, svfloat32_t, 2, svfloat32x2_t) + (pub, svint32_t, 2, svint32x2_t) + (pub, svuint32_t, 2, svuint32x2_t) + (pub, svfloat64_t, 2, svfloat64x2_t) + (pub, svint64_t, 2, svint64x2_t) + (pub, svuint64_t, 2, svuint64x2_t) + + (pub, svint8_t, 3, svint8x3_t) + (pub, svuint8_t, 3, svuint8x3_t) + (pub, svint16_t, 3, svint16x3_t) + (pub, svuint16_t, 3, svuint16x3_t) + (pub, svfloat32_t, 3, svfloat32x3_t) + (pub, svint32_t, 3, svint32x3_t) + (pub, svuint32_t, 3, svuint32x3_t) + (pub, svfloat64_t, 3, svfloat64x3_t) + (pub, svint64_t, 3, svint64x3_t) + (pub, svuint64_t, 3, svuint64x3_t) + + (pub, svint8_t, 4, svint8x4_t) + (pub, svuint8_t, 4, svuint8x4_t) + (pub, svint16_t, 4, svint16x4_t) + (pub, svuint16_t, 4, svuint16x4_t) + (pub, svfloat32_t, 4, svfloat32x4_t) + (pub, svint32_t, 4, svint32x4_t) + (pub, svuint32_t, 4, svuint32x4_t) + (pub, svfloat64_t, 4, svfloat64x4_t) + (pub, svint64_t, 4, svint64x4_t) + (pub, svuint64_t, 4, svuint64x4_t) +} + +impl_sign_conversions! { + (i8, u8) + (i16, u16) + (i32, u32) + (i64, u64) + (*const i8, *const u8) + (*const i16, *const u16) + (*const i32, *const u32) + (*const i64, *const u64) + (*mut i8, *mut u8) + (*mut i16, *mut u16) + (*mut i32, *mut u32) + (*mut i64, *mut u64) +} + +impl_sign_conversions_sv! { + (svint8_t, svuint8_t) + (svint16_t, svuint16_t) + (svint32_t, svuint32_t) + (svint64_t, svuint64_t) + + (svint8x2_t, svuint8x2_t) + (svint16x2_t, svuint16x2_t) + (svint32x2_t, svuint32x2_t) + (svint64x2_t, svuint64x2_t) + + (svint8x3_t, svuint8x3_t) + (svint16x3_t, svuint16x3_t) + (svint32x3_t, svuint32x3_t) + (svint64x3_t, svuint64x3_t) + + (svint8x4_t, svuint8x4_t) + (svint16x4_t, svuint16x4_t) + (svint32x4_t, svuint32x4_t) + (svint64x4_t, svuint64x4_t) + + // Internal types: + (nxv2i8, nxv2u8) + (nxv4i8, nxv4u8) + (nxv8i8, nxv8u8) + + (nxv2i16, nxv2u16) + (nxv4i16, nxv4u16) + + (nxv2i32, nxv2u32) +} + +impl_internal_sve_predicate! { + (svbool2_t, 2) + (svbool4_t, 4) + (svbool8_t, 8) +} + +/// Patterns returned by a `PTRUE` +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svpattern { + /// Activate the largest power-of-two number of elements that is less than the vector length + SV_POW2 = 0, + /// Activate the first element + SV_VL1 = 1, + /// Activate the first two elements + SV_VL2 = 2, + /// Activate the first three elements + SV_VL3 = 3, + /// Activate the first four elements + SV_VL4 = 4, + /// Activate the first five elements + SV_VL5 = 5, + /// Activate the first six elements + SV_VL6 = 6, + /// Activate the first seven elements + SV_VL7 = 7, + /// Activate the first eight elements + SV_VL8 = 8, + /// Activate the first sixteen elements + SV_VL16 = 9, + /// Activate the first thirty-two elements + SV_VL32 = 10, + /// Activate the first sixty-four elements + SV_VL64 = 11, + /// Activate the first one-hundred-and-twenty-eight elements + SV_VL128 = 12, + /// Activate the first two-hundred-and-fifty-six elements + SV_VL256 = 13, + /// Activate the largest multiple-of-four number of elements that is less than the vector length + SV_MUL4 = 29, + /// Activate the largest multiple-of-three number of elements that is less than the vector + /// length + SV_MUL3 = 30, + /// Activate all elements + SV_ALL = 31, +} + +/// Addressing mode for prefetch intrinsics - allows the specification of the expected access +/// kind (read or write), the cache level to load the data, the data retention policy +/// (temporal or streaming) +#[repr(i32)] +#[allow(non_camel_case_types)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, ConstParamTy)] +#[non_exhaustive] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub enum svprfop { + /// Temporal fetch of the addressed location for reading to the L1 cache (i.e. allocate in + /// cache normally) + SV_PLDL1KEEP = 0, + /// Streaming fetch of the addressed location for reading to the L1 cache (i.e. memory only + /// used once) + SV_PLDL1STRM = 1, + /// Temporal fetch of the addressed location for reading to the L2 cache (i.e. allocate in + /// cache normally) + SV_PLDL2KEEP = 2, + /// Streaming fetch of the addressed location for reading to the L2 cache (i.e. memory only + /// used once) + SV_PLDL2STRM = 3, + /// Temporal fetch of the addressed location for reading to the L3 cache (i.e. allocate in + /// cache normally) + SV_PLDL3KEEP = 4, + /// Streaming fetch of the addressed location for reading to the L3 cache (i.e. memory only + /// used once) + SV_PLDL3STRM = 5, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. allocate in + /// cache normally) + SV_PSTL1KEEP = 8, + /// Temporal fetch of the addressed location for writing to the L1 cache (i.e. memory only + /// used once) + SV_PSTL1STRM = 9, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. allocate in + /// cache normally) + SV_PSTL2KEEP = 10, + /// Temporal fetch of the addressed location for writing to the L2 cache (i.e. memory only + /// used once) + SV_PSTL2STRM = 11, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. allocate in + /// cache normally) + SV_PSTL3KEEP = 12, + /// Temporal fetch of the addressed location for writing to the L3 cache (i.e. memory only + /// used once) + SV_PSTL3STRM = 13, +} + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -0,0 +1 @@ + diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs new file mode 100644 index 000000000000..acf907021457 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/mod.rs @@ -0,0 +1,17 @@ +//! SVE2 intrinsics + +#![allow(non_camel_case_types)] + +// `generated.rs` has a `super::*` and this import is for that +use super::sve::*; +use crate::intrinsics::*; + +#[rustfmt::skip] +mod generated; +#[rustfmt::skip] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub use self::generated::*; + +#[cfg(test)] +#[path = "ld_st_tests_aarch64.rs"] +mod ld_st_tests; diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 9255994e5ee8..f2f19eba2670 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -40,7 +40,8 @@ const_cmp, const_eval_select, maybe_uninit_as_bytes, - movrs_target_feature + movrs_target_feature, + min_adt_const_params )] #![cfg_attr(test, feature(test, abi_vectorcall, stdarch_internal))] #![deny(clippy::missing_inline_in_public_items)] From 78ccc9277080f2dfec7003801aa2362d2ccdab0f Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 16 Jan 2026 12:30:36 +0000 Subject: [PATCH 08/20] gen-arm: use `sve_into` instead of `into` `Into::into` can't be used here because the implementations can't have the required target feature, so `SveInto` needs to be introduced and written by the generator --- library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index e20ab6779cfc..18a638a0390b 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -630,7 +630,7 @@ pub fn apply_conversions_to_call( match (scope, kind.base_type()) { (Argument, Some(Sized(Bool, bitsize))) if *bitsize != 8 => { - Ok(convert("into", arg)) + Ok(convert("sve_into", arg)) } (Argument, Some(Sized(UInt, _) | Unsized(UInt))) => { if ctx.global.auto_llvm_sign_conversion { @@ -663,7 +663,7 @@ pub fn apply_conversions_to_call( let fn_call = Expression::FnCall(fn_call); match return_type_conversion { - Some(Bool) => Ok(convert("into", fn_call)), + Some(Bool) => Ok(convert("sve_into", fn_call)), Some(UInt) if ctx.global.auto_llvm_sign_conversion => { Ok(convert("as_unsigned", fn_call)) } From a7d4530a985d688aca5d0c003df46a1fca05cd66 Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 09/20] gen-arm: correct renamed `from_exposed_addr` link `core::ptr::from_exposed_addr` was renamed to `core::ptr::with_exposed_provenance` and so this link needs updated. --- library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index 18a638a0390b..5d38d45ca690 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -871,8 +871,8 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Self::NoProvenance(arg) => write!( f, "Addresses passed in `{arg}` lack provenance, so this is similar to using a \ - `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before \ - using it." + `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane \ + before using it." ), Self::UnpredictableOnFault => write!( f, From ca5032f50fc5acf936f64ab252ae97ecff8c0c60 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 15:53:53 +0000 Subject: [PATCH 10/20] gen-arm: add sve intrinsic definitions Thousands of lines of SVE intrinsic definitions.. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../stdarch-gen-arm/spec/sve/aarch64.spec.yml | 5199 +++++++++++++++++ .../spec/sve2/aarch64.spec.yml | 3196 ++++++++++ 2 files changed, 8395 insertions(+) create mode 100644 library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml create mode 100644 library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml new file mode 100644 index 000000000000..1fad8bb371f9 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -0,0 +1,5199 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve] + llvm_prefix: llvm.aarch64.sve + +uses_neon_types: true +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svacge[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facge.{sve_type}" } + + - name: svacgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "facgt.{sve_type}" } + + - name: svacle[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facge] + n_variant_op: op2 + compose: + - FnCall: ["svacge_{type}", [$pg, $op2, $op1]] + + - name: svaclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute compare less than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [facgt] + n_variant_op: op2 + compose: + - FnCall: ["svacgt_{type}", [$pg, $op2, $op1]] + + - name: svcadd[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[fcadd, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcadd.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $IMM_ROTATION]] + + - name: svcmla[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[fcmla, "IMM_ROTATION = 90"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: fcmla.{sve_type} + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[fcmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: fcmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.f}add"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}add.{sve_type}" } + + - name: svqsub[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.x.{sve_type}" } + + - name: svcnt[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count nonzero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [cnt] + compose: + - LLVMLink: { name: "cnt.{sve_type[0]}" } + + - name: svcls[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading sign bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [i16, u16], [i32, u32], [i64, u64]] + zeroing_method: { drop: inactive } + assert_instr: [cls] + compose: + - LLVMLink: { name: "cls.{sve_type[0]}" } + + - name: svclz[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Count leading zero bits + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + zeroing_method: { drop: inactive } + assert_instr: [clz] + compose: + - LLVMLink: { name: "clz.{sve_type[0]}" } + + - name: svext{size_literal[1]}[_{type[0]}]{_mxz} + attr: [*sve-unstable] + substitutions: + sign_or_zero: + match_kind: "{type[0]}" + default: Sign + unsigned: Zero + kind_literal: { match_kind: "{type[0]}", default: s, unsigned: u } + doc: "{sign_or_zero}-extend the low {size[1]} bits" + arguments: + ["inactive: {sve_type[0]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + zeroing_method: { drop: inactive } + assert_instr: ["{type_kind[0].su}xt{size_literal[1]}"] + compose: + - LLVMLink: + name: "{type_kind[0].su}xt{size_literal[1]}.{sve_type[0]}" + + - name: svsqrt[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Square root + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { drop: inactive } + assert_instr: [fsqrt] + compose: + - LLVMLink: { name: "fsqrt.{sve_type}" } + + - name: svcmpeq[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmeq, default: cmpeq }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpeq.{sve_type}" } + + - name: svcmpeq_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + assert_instr: [cmpeq] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpeq.wide.{sve_type[0]}" } + + - name: svcmpge[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpge.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphs.{sve_type}" } + + - name: svcmpge_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + n_variant_op: op2 + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpge, unsigned: cmphs }] + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpge.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphs.wide.{sve_type[0]}" } + + - name: svcmpgt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type}" + - default: + LLVMLink: { name: "{type_kind.f}cmpgt.{sve_type}" } + unsigned: + LLVMLink: { name: "cmphi.{sve_type}" } + + - name: svcmpgt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare greater than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmpgt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmphi.wide.{sve_type[0]}" } + + - name: svcmple[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpge_{type}", [$pg, $op2, $op1]] + + - name: svcmple_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than or equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmple, unsigned: cmpls }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmple.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmpls.wide.{sve_type[0]}" } + + - name: svcmplt[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare less than + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }] + n_variant_op: op2 + compose: + - FnCall: ["svcmpgt_{type}", [$pg, $op2, $op1]] + + - name: svcmplt_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare less than + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: [{ default: cmplt, unsigned: cmplo }] + n_variant_op: op2 + compose: + - MatchKind: + - "{type[0]}" + - default: + LLVMLink: { name: "cmplt.wide.{sve_type[0]}" } + unsigned: + LLVMLink: { name: "cmplo.wide.{sve_type[0]}" } + + - name: svcmpne[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [{ float: fcmne, default: cmpne }] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}cmpne.{sve_type}" } + + - name: svcmpne_wide[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Compare not equal to + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{predicate[0]}" + types: [[[i8, i16, i32], i64]] + assert_instr: [cmpne] + n_variant_op: op2 + compose: + - LLVMLink: { name: "cmpne.wide.{sve_type[0]}" } + + - name: svcmpuo[{_n}_{type}] + attr: [*sve-unstable] + doc: Compare unordered with + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [f32, f64] + assert_instr: [fcmuo] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fcmpuo.{sve_type}" } + + - name: svcnt{size_literal} + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + return_type: u64 + types: [i8, i16, i32, i64] + assert_instr: + - default: { byte: rdvl, halfword: cnth, default: cntw, doubleword: cntd } + compose: + - FnCall: ["svcnt{size_literal}_pat", [], ["{{ svpattern::SV_ALL }}"]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: + - [rdvl, "PATTERN = {{ svpattern::SV_ALL }}"] + - ["cnt{size_literal}", "PATTERN = {{ svpattern::SV_MUL4 }}"] + types: [i8] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svcnt{size_literal}_pat + attr: [*sve-unstable] + doc: Count the number of {size}-bit elements in a vector + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: u64 + assert_instr: [["cnt{size_literal}", "PATTERN = {{ svpattern::SV_ALL }}"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: cnt{size_literal} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svlen[_{type}] + attr: [*sve-unstable] + doc: Count the number of elements in a full vector + arguments: ["_op: {sve_type}"] + return_type: "u64" + types: [i8, u8, i16, u16, i32, u32, f32, i64, u64, f64] + assert_instr: [{ default: { default: "cnt{size_literal}", byte: rdvl } }] + compose: + - FnCall: ["svcnt{size_literal}", []] + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup[_n]_{type}{_mxz} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + assert_instr: [mov] + compose: + - LLVMLink: { name: "dup.{sve_type}" } + + - name: svdup[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["op: bool"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [sbfx, whilelo] + compose: + - LLVMLink: { name: "dup.x.{sve_type}" } + + - name: svdup_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Broadcast a scalar value + arguments: ["data: {sve_type[0]}", "index: {type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - FnCall: + - svtbl_{type[0]} + - - $data + - FnCall: ["svdup_n_{type[1]}", [$index]] + + - name: svdupq_lane[_{type}] + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["data: {sve_type}", "index: u64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [tbl] + compose: + - LLVMLink: { name: "dupq.lane.{sve_type}" } + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + - "x8: {type}" + - "x9: {type}" + - "x10: {type}" + - "x11: {type}" + - "x12: {type}" + - "x13: {type}" + - "x14: {type}" + - "x15: {type}" + return_type: "{sve_type}" + types: [i8, u8] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - - $x0 + - $x1 + - $x2 + - $x3 + - $x4 + - $x5 + - $x6 + - $x7 + - $x8 + - $x9 + - $x10 + - $x11 + - $x12 + - $x13 + - $x14 + - $x15 + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b8] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + - "x8: bool" + - "x9: bool" + - "x10: bool" + - "x11: bool" + - "x12: bool" + - "x13: bool" + - "x14: bool" + - "x15: bool" + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s8 + - - CastAs: [$x0, i8] + - CastAs: [$x1, i8] + - CastAs: [$x2, i8] + - CastAs: [$x3, i8] + - CastAs: [$x4, i8] + - CastAs: [$x5, i8] + - CastAs: [$x6, i8] + - CastAs: [$x7, i8] + - CastAs: [$x8, i8] + - CastAs: [$x9, i8] + - CastAs: [$x10, i8] + - CastAs: [$x11, i8] + - CastAs: [$x12, i8] + - CastAs: [$x13, i8] + - CastAs: [$x14, i8] + - CastAs: [$x15, i8] + - FnCall: + - svcmpne_wide_s8 + - - FnCall: [svptrue_b8, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: + - "x0: {type}" + - "x1: {type}" + - "x2: {type}" + - "x3: {type}" + - "x4: {type}" + - "x5: {type}" + - "x6: {type}" + - "x7: {type}" + return_type: "{sve_type}" + types: [i16, u16] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: + - "crate::mem::transmute" + - - [$x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b16] + arguments: + - "x0: bool" + - "x1: bool" + - "x2: bool" + - "x3: bool" + - "x4: bool" + - "x5: bool" + - "x6: bool" + - "x7: bool" + return_type: svbool_t + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s16 + - - CastAs: [$x0, i16] + - CastAs: [$x1, i16] + - CastAs: [$x2, i16] + - CastAs: [$x3, i16] + - CastAs: [$x4, i16] + - CastAs: [$x5, i16] + - CastAs: [$x6, i16] + - CastAs: [$x7, i16] + - FnCall: + - svcmpne_wide_s16 + - - FnCall: [svptrue_b16, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}", "x2: {type}", "x3: {type}"] + return_type: "{sve_type}" + types: [f32, i32, u32] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1, $x2, $x3]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b32] + arguments: ["x0: bool", "x1: bool", "x2: bool", "x3: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: + - svdupq_n_s32 + - - CastAs: [$x0, i32] + - CastAs: [$x1, i32] + - CastAs: [$x2, i32] + - CastAs: [$x3, i32] + - FnCall: + - svcmpne_wide_s32 + - - FnCall: [svptrue_b32, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + arguments: ["x0: {type}", "x1: {type}"] + return_type: "{sve_type}" + types: [f64, i64, u64] + assert_instr: [] + compose: + - LLVMLink: + name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] + - Let: + - op + - FnCall: + - "{llvm_link}" + - - FnCall: ["svundef_{type}", [], [], true] + - FnCall: ["crate::mem::transmute", [[$x0, $x1]]] + - 0 + - FnCall: ["svdupq_lane_{type}", [$op, 0]] + + - name: svdupq[_n]_{type} + attr: [*sve-unstable] + doc: Broadcast a quadword of scalars + types: [b64] + arguments: ["x0: bool", "x1: bool"] + return_type: "svbool_t" + assert_instr: [] + compose: + - Let: + - op1 + - FnCall: [svdupq_n_s64, [CastAs: [$x0, i64], CastAs: [$x1, i64]]] + - FnCall: + - svcmpne_s64 + - - FnCall: [svptrue_b64, []] + - $op1 + - FnCall: [svdup_n_s64, [0]] + + - name: svcreate2[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of two vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create2", [$x0, $x1], [], true] + + - name: svcreate3[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of three vectors + arguments: ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create3", [$x0, $x1, $x2], [], true] + + - name: svcreate4[_{type}] + attr: [*sve-unstable] + doc: Create a tuple of four vectors + arguments: + ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}", "x3: {sve_type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_create4", [$x0, $x1, $x2, $x3], [], true] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized vector + arguments: [] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef2_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of two vectors + arguments: [] + return_type: "{sve_type_x2}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate2_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef3_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of three vectors + arguments: [] + return_type: "{sve_type_x3}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate3_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + - FnCall: ["svdup_n_{type}", ["0"]] + + - name: svundef4_{type} + attr: [*sve-unstable] + safety: + unsafe: [uninitialized] + doc: Create an uninitialized tuple of four vectors + arguments: [] + return_type: "{sve_type_x4}" + types: [f32, f64] + assert_instr: [] + compose: + - FnCall: + - "svcreate4_{type}" + - - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + - FnCall: ["svdup_n_{type}", ["0{type}"]] + + - name: svindex_{type} + attr: [*sve-unstable] + doc: Create linear series + arguments: ["base: {type}", "step: {type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [index] + compose: + - LLVMLink: { name: "index.{sve_type}" } + + - name: svget2[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of two vectors + arguments: ["tuple: {sve_type_x2}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget3[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of three vectors + arguments: ["tuple: {sve_type_x3}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svget4[_{type}] + attr: [*sve-unstable] + doc: Extract one vector from a tuple of four vectors + arguments: ["tuple: {sve_type_x4}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_get", [$tuple], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset2[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of two vectors + arguments: ["tuple: {sve_type_x2}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 1] }] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset3[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of three vectors + arguments: ["tuple: {sve_type_x3}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 2] }] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svset4[_{type}] + attr: [*sve-unstable] + doc: Change one vector in a tuple of four vectors + arguments: ["tuple: {sve_type_x4}", "x: {sve_type}"] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, range: [0, 3] }] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [] + compose: + - FnCall: ["crate::intrinsics::simd::scalable::sve_tuple_set", [$tuple, $x], ["_", "_", "{{IMM_INDEX}}"], true] + + - name: svzip1[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1_{type} + attr: [*sve-unstable] + doc: Interleave elements from low halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1.{sve_type}" } + + - name: svzip1q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from low halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip1] + compose: + - LLVMLink: { name: "zip1q.{sve_type}" } + + - name: svzip2[_{type}] + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2_{type} + attr: [*sve-unstable] + doc: Interleave elements from high halves of two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2.{sve_type}" } + + - name: svzip2q[_{type}] + attr: [*sve-unstable] + doc: Interleave quadwords from high halves of two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [zip2] + compose: + - LLVMLink: { name: "zip2q.{sve_type}" } + + - name: svuzp1[_{type}] + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1_{type} + attr: [*sve-unstable] + doc: Concatenate even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1.{sve_type}" } + + - name: svuzp1q[_{type}] + attr: [*sve-unstable] + doc: Concatenate even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp1] + compose: + - LLVMLink: { name: "uzp1q.{sve_type}" } + + - name: svuzp2[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2_{type} + attr: [*sve-unstable] + doc: Concatenate odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2.{sve_type}" } + + - name: svuzp2q[_{type}] + attr: [*sve-unstable] + doc: Concatenate odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [uzp2] + compose: + - LLVMLink: { name: "uzp2q.{sve_type}" } + + - name: svtrn1[_{type}] + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1_{type} + attr: [*sve-unstable] + doc: Interleave even elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1.{sve_type}" } + + - name: svtrn1q[_{type}] + attr: [*sve-unstable] + doc: Interleave even quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn1] + compose: + - LLVMLink: { name: "trn1q.{sve_type}" } + + - name: svtrn2[_{type}] + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2_{type} + attr: [*sve-unstable] + doc: Interleave odd elements from two inputs + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2.{sve_type}" } + + - name: svtrn2q[_{type}] + attr: [*sve-unstable] + doc: Interleave odd quadwords from two inputs + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [trn2] + compose: + - LLVMLink: { name: "trn2q.{sve_type}" } + + - name: svrev[_{type}] + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrev_{type} + attr: [*sve-unstable] + doc: Reverse all elements + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [b8, b16, b32, b64] + assert_instr: [rev] + compose: + - LLVMLink: { name: "rev.{sve_type}" } + + - name: svrevb[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bytes within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revb] + compose: + - LLVMLink: { name: "revb.{sve_type}" } + + - name: svrevh[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse halfwords within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i32, i64, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revh] + compose: + - LLVMLink: { name: "revh.{sve_type}" } + + - name: svrevw[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse words within elements + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i64, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [revw] + compose: + - LLVMLink: { name: "revw.{sve_type}" } + + - name: svrbit[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reverse bits + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: "inactive" } + assert_instr: [rbit] + compose: + - LLVMLink: { name: "rbit.{sve_type}" } + + - name: svext[_{type}] + attr: [*sve-unstable] + doc: Extract vector from pair of vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, sve_max_elems_type: "{type}" }] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[ext, "IMM3 = 1"]] + compose: + - LLVMLink: + name: ext.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsplice[_{type}] + attr: [*sve-unstable] + doc: Splice two vectors under predicate control + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [splice] + compose: + - LLVMLink: { name: "splice.{sve_type}" } + + - name: svinsr[_n_{type}] + attr: [*sve-unstable] + doc: Insert scalar in shifted vector + arguments: ["op1: {sve_type}", "op2: {type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [insr] + compose: + - LLVMLink: { name: "insr.{sve_type}" } + + - name: svld1[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1.{sve_type}" } + + - name: svld1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1]}" + + - name: svld1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1]}" + + - name: svld1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svld1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ld1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svld1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ld1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ld1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ld1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnt1.{sve_type}" } + + - name: svldnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnt1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svld1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ld1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svld1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ld1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ld1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svld1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld2[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld2.sret.{sve_type}" } + + - name: svld2_vnum[_{type}] + attr: [*sve-unstable] + doc: Load two-element tuples into two vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld2{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld3[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld3.sret.{sve_type}" } + + - name: svld3_vnum[_{type}] + attr: [*sve-unstable] + doc: Load three-element tuples into three vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld3{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld4[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld4.sret.{sve_type}" } + + - name: svld4_vnum[_{type}] + attr: [*sve-unstable] + doc: Load four-element tuples into four vectors + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld4{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svld4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svld1rq[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 128 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1rq{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1rq.{sve_type}" } + + - name: svld1ro[_{type}] + attr: [*sve-unstable] + doc: Load and replicate 256 bits of data + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + target_features: [f64mm] + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ld1ro{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ld1ro.{sve_type}" } + + - name: svldnf1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldnf1.{sve_type}" } + + - name: svldnf1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldnf1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldnf1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldnf1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnf1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldnf1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldnf1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldnf1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, non-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_non_faulting + - dereference: predicated_non_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldnf1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnf1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - LLVMLink: { name: "ldff1.{sve_type}" } + + - name: svldff1_vnum[_{type}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["ldff1{size_literal}"] + test: { load: 0 } + compose: + - FnCall: + - "svldff1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1s{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + + - name: svldff1u{size_literal[1]}_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.{sve_type[0] as {type[1]}}" + arguments: ["pg: {predicate[0]}", "base: *{type[1]}"] + return_type: "{sve_type[0] as {type[1]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base]] + - [Type: "{sve_type[0] as {type[1]}}", _] + + - name: svldff1s{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], i8] + - [[i32, i64, u32, u64], i16] + - [[i64, u64], i32] + assert_instr: ["ldff1s{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1s{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1u{size_literal[1]}_vnum_{type[0]} + attr: [*sve-unstable] + doc: Load {size[1]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset_vnum: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"] + return_type: "{sve_type[0]}" + types: + - [[i16, i32, i64, u16, u32, u64], u8] + - [[i32, i64, u32, u64], u16] + - [[i64, u64], u32] + assert_instr: ["ldff1{size_literal[1]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1u{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + + - name: svldff1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1]}" + + - name: svldff1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1]}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1]}" + + - name: svldff1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldff1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldff1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldff1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], i16] + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], u16] + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldff1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i32, u32], [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldff1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - crate::intrinsics::simd::simd_cast + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + doc: Load {size[2]}-bit data and sign-extend, first-faulting + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["ldff1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, first-faulting + safety: + unsafe: + - pointer_offset: predicated_first_faulting + - dereference: predicated_first_faulting + - unpredictable_on_fault + - no_provenance: bases + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldff1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svrdffr_z + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: ["pg: svbool_t"] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - LLVMLink: { name: "rdffr.z" } + + - name: svrdffr + attr: [*sve-unstable] + doc: Read FFR, returning predicate of succesfully loaded elements + arguments: [] + return_type: svbool_t + assert_instr: [rdffr] + compose: + - FnCall: [svrdffr_z, [FnCall: [svptrue_b8, []]]] + + - name: svsetffr + attr: [*sve-unstable] + doc: Initialize the first-fault register to all-true + arguments: [] + assert_instr: [setffr] + compose: + - LLVMLink: { name: "setffr" } + + - name: svwrffr + attr: [*sve-unstable] + doc: Write to the first-fault register + arguments: ["op: svbool_t"] + assert_instr: [wrffr] + compose: + - LLVMLink: { name: "wrffr" } + + - name: svqinc{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qinc{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqinc{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qinc{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qinc{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqinc{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qinc{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqinc{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqinc{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating increment by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qinc{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qinc{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal[1]}[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - ["{type_kind[0].su}qdec{size_literal[1]}", "IMM_FACTOR = 1"] + compose: + - FnCall: + - "svqdec{size_literal[1]}_pat_n_{type[0]}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal[1]}_pat[_n_{type[0]}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type[1]}" + default: word + byte: byte + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {type[0]}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{type[0]}" + types: + - [[i32, i64, u32, u64], [i8, i16, i32, i64]] + assert_instr: + - - "{type_kind[0].su}qdec{size_literal[1]}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind[0].su}qdec{size_literal[1]}.n{size[0]}" + arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{type[0]}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svqdec{size_literal}[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const IMM_FACTOR: i32"] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: [["{type_kind.su}qdec{size_literal}", "IMM_FACTOR = 1"]] + compose: + - FnCall: + - "svqdec{size_literal}_pat_{type}" + - [$op] + - ["{{svpattern::SV_ALL}}", $IMM_FACTOR] + + - name: svqdec{size_literal}_pat[_{type}] + attr: [*sve-unstable] + substitutions: + textual_size: + match_size: "{type}" + default: word + halfword: halfword + doubleword: doubleword + doc: Saturating decrement by number of {textual_size} elements + arguments: ["op: {sve_type}"] + static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"] + constraints: [{ variable: IMM_FACTOR, range: [1, 16] }] + return_type: "{sve_type}" + types: [i16, u16, i32, u32, i64, u64] + assert_instr: + - - "{type_kind.su}qdec{size_literal}" + - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1" + compose: + - LLVMLink: + name: "{type_kind.su}qdec{size_literal}.{sve_type}" + arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"] + return_type: "{sve_type}" + - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]] + + - name: svst1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svst1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svst1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svst1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "st1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svst1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["st1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, i16] + - [[i32, u32], u32, u16] + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $indices] + + - name: svst1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i32, u32], i32, [i8, i16]] + - [[i32, u32], u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "st1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $offsets] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "st1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $bases, $offset] + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["st1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svstnt1[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "stnt1.{sve_type}" + arguments: + - "data: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: ["{llvm_link}", [$data, $pg, $base]] + + - name: svstnt1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["stnt1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svstnt1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1{size_literal[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate[0]}", "base: *mut {type[1]}", "data: {sve_type[0]}"] + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "st1.{sve_type[0] as {type[1]}}" + arguments: + - "data: {sve_type[0] as {type[1]}}" + - "pg: {predicate[0]}" + - "ptr: *mut {type[1]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base] + + - name: svst1{size_literal[1]}_vnum[_{type[0]}] + attr: [*sve-unstable] + doc: Truncate to {size[1]} bits and store + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "vnum: i64" + - "data: {sve_type[0]}" + types: + - [[i16, i32, i64], i8] + - [[u16, u32, u64], u8] + - [[i32, i64], i16] + - [[u32, u64], u16] + - [i64, i32] + - [u64, u32] + assert_instr: ["st1{size_literal[1]}"] + test: { store: 1 } + compose: + - FnCall: + - "svst1{size_literal[1]}_{type[0]}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: + [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst1_vnum[_{type}] + attr: [*sve-unstable] + doc: Non-truncating store + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st1{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst1_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst2[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x2}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st2.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type}", ["$data"], [0]] + - FnCall: ["svget2_{type}", ["$data"], [1]] + - "$pg" + - "$base" + + - name: svst2_vnum[_{type}] + attr: [*sve-unstable] + doc: Store two vectors into two-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x2}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st2{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst2_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst3[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x3}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st3.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget3_{type}", ["$data"], [0]] + - FnCall: ["svget3_{type}", ["$data"], [1]] + - FnCall: ["svget3_{type}", ["$data"], [2]] + - "$pg" + - "$base" + + - name: svst3_vnum[_{type}] + attr: [*sve-unstable] + doc: Store three vectors into three-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x3}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st3{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst3_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svst4[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x4}"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - LLVMLink: + name: "st4.{sve_type}" + arguments: + - "data0: {sve_type}" + - "data1: {sve_type}" + - "data2: {sve_type}" + - "data3: {sve_type}" + - "pg: {predicate}" + - "ptr: *mut {type}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget4_{type}", ["$data"], [0]] + - FnCall: ["svget4_{type}", ["$data"], [1]] + - FnCall: ["svget4_{type}", ["$data"], [2]] + - FnCall: ["svget4_{type}", ["$data"], [3]] + - "$pg" + - "$base" + + - name: svst4_vnum[_{type}] + attr: [*sve-unstable] + doc: Store four vectors into four-element tuples + safety: + unsafe: + - pointer_offset_vnum: predicated + - dereference: predicated + arguments: + - "pg: {predicate}" + - "base: *mut {type}" + - "vnum: i64" + - "data: {sve_type_x4}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["st4{size_literal}"] + test: { store: 0 } + compose: + - FnCall: + - "svst4_{type}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - $data + + - name: svtbl[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table + arguments: ["data: {sve_type[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [tbl] + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + compose: + - LLVMLink: { name: "tbl.{sve_type[0]}" } + + - name: svwhilele_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilele, unsigned: whilels }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilele.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilels.{sve_type[1]}.{type[0]}" } } + + - name: svwhilelt_{type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While incrementing scalar is less than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilelt, unsigned: whilelo }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilelt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilelo.{sve_type[1]}.{type[0]}" } } + + - name: svmax[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind}max"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}max.{sve_type}" } + + - name: svmaxnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Maximum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fmaxnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}maxnm.{sve_type}" } + + - name: svpfalse[_b] + attr: [*sve-unstable] + doc: Set all predicate elements to false + arguments: [] + return_type: "svbool_t" + assert_instr: [pfalse] + compose: + - FnCall: + - "svdupq_n_b8" + - - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + - false + + - name: svptrue_pat_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + static_defs: ["const PATTERN: svpattern"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [[ptrue, "PATTERN = {{svpattern::SV_ALL}}"]] + compose: + - LLVMLink: + name: ptrue.{sve_type} + arguments: ["pattern: svpattern"] + - FnCall: ["{llvm_link}", [$PATTERN]] + + - name: svptrue_{type} + attr: [*sve-unstable] + doc: Set predicate elements to true + arguments: [] + return_type: "svbool_t" + types: [b8, b16, b32, b64] + assert_instr: [ptrue] + compose: + - FnCall: ["svptrue_pat_{type}", [], ["{{svpattern::SV_ALL}}"]] + + - name: svptest_any + attr: [*sve-unstable] + doc: Test whether any active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.any.nxv16i1" } + + - name: svptest_first + attr: [*sve-unstable] + doc: Test whether first active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.first.nxv16i1" } + + - name: svptest_last + attr: [*sve-unstable] + doc: Test whether last active element is true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "bool" + assert_instr: [ptest] + compose: + - LLVMLink: { name: "ptest.last.nxv16i1" } + + - name: svpfirst[_b] + attr: [*sve-unstable] + doc: Set the first active predicate element to true + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [pfirst] + compose: + - LLVMLink: { name: "pfirst.nxv16i1" } + + - name: svpnext_{type} + attr: [*sve-unstable] + doc: Find next active predicate + arguments: ["pg: {predicate}", "op: {predicate}"] + return_type: "{predicate}" + types: [b8, b16, b32, b64] + assert_instr: [pnext] + compose: + - LLVMLink: { name: "pnext.{sve_type}" } + + - name: svbrkn[_b]_z + attr: [*sve-unstable] + doc: Propagate break to next partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkn] + compose: + - LLVMLink: { name: "brkn.z.nxv16i1" } + + - name: svbrkb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.z.nxv16i1" } + + - name: svbrkb[_b]_m + attr: [*sve-unstable] + doc: Break before first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkb] + compose: + - LLVMLink: { name: "brkb.nxv16i1" } + + - name: svbrkpb[_b]_z + attr: [*sve-unstable] + doc: Break before first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpb] + compose: + - LLVMLink: { name: "brkpb.z.nxv16i1" } + + - name: svbrka[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.z.nxv16i1" } + + - name: svbrka[_b]_m + attr: [*sve-unstable] + doc: Break after first true condition + arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"] + return_type: "svbool_t" + assert_instr: [brka] + compose: + - LLVMLink: { name: "brka.nxv16i1" } + + - name: svbrkpa[_b]_z + attr: [*sve-unstable] + doc: Break after first true condition, propagating from previous partition + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [brkpa] + compose: + - LLVMLink: { name: "brkpa.z.nxv16i1" } + + - name: svsel[_b] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: "svbool_t" + assert_instr: [sel] + compose: + - FnCall: ["simd_select", [$pg, $op1, $op2]] + + - name: svsel[_{type}] + attr: [*sve-unstable] + doc: Conditionally select elements + arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [sel] + compose: + - FnCall: + - "simd_select" + - - MatchSize: + - "{type}" + - { default: { MethodCall: [$pg, sve_into, []] }, byte: $pg } + - $op1 + - $op2 + - - MatchSize: + - "{type}" + - byte: svbool_t + halfword: svbool8_t + default: svbool4_t + doubleword: svbool2_t + - _ + + - name: svsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}sub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}sub.{sve_type}" } + + - name: svsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64] + assert_instr: ["{type_kind.f}subr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}subr.{sve_type}" } + + - name: svcntp_{predicate} + attr: [*sve-unstable] + doc: Count set predicate bits + arguments: ["pg: {predicate}", "op: {predicate}"] + types: [b8, b16, b32, b64] + return_type: u64 + assert_instr: [cntp] + compose: + - LLVMLink: { name: "cntp.{predicate}" } + + - name: svcompact[_{type}] + attr: [*sve-unstable] + doc: Shuffle active elements of vector to the right and fill with zero + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: [compact] + compose: + - LLVMLink: { name: "compact.{sve_type}" } + + - name: svlasta[_{type}] + attr: [*sve-unstable] + doc: Extract element after last + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lasta] + compose: + - LLVMLink: { name: "lasta.{sve_type}" } + + - name: svclasta[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.{sve_type}" } + + - name: svclasta[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract element after last + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clasta] + compose: + - LLVMLink: { name: "clasta.n.{sve_type}" } + + - name: svlastb[_{type}] + attr: [*sve-unstable] + doc: Extract last element + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [lastb] + compose: + - LLVMLink: { name: "lastb.{sve_type}" } + + - name: svclastb[_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.{sve_type}" } + + - name: svclastb[_n_{type}] + attr: [*sve-unstable] + doc: Conditionally extract last element + arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [clastb] + compose: + - LLVMLink: { name: "clastb.n.{sve_type}" } + + - name: svqdecp[_{type}] + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind.su}qdecp.{sve_type}" } + + - name: svqdecp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating decrement by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qdecp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qdecp.n{size[0]}.{sve_type[1]}" } + + - name: svqincp[_{type}] + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {sve_type}", "pg: {predicate}"] + return_type: "{sve_type}" + types: [i16, i32, i64, u16, u32, u64] + assert_instr: ["{type_kind.su}qincp"] + compose: + - LLVMLink: { name: "{type_kind.su}qincp.{sve_type}" } + + - name: svqincp[_n_{type[0]}]_{type[1]} + attr: [*sve-unstable] + doc: Saturating increment by active element count + arguments: ["op: {type[0]}", "pg: {sve_type[1]}"] + return_type: "{type[0]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: ["{type_kind[0].su}qincp"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qincp.n{size[0]}.{sve_type[1]}" } + + - name: svtmad[_{type}] + attr: [*sve-unstable] + doc: Trigonometric multiply-add coefficient + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: [0, 7] }] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [[ftmad, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "ftmad.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: + - "{llvm_link}" + - [op1, op2, IMM3] + + - name: svtsmul[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric starting value + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftsmul] + compose: + - LLVMLink: + name: "ftsmul.x.{sve_type[0]}" + + - name: svtssel[_{type[0]}] + attr: [*sve-unstable] + doc: Trigonometric select coefficient + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + assert_instr: [ftssel] + compose: + - LLVMLink: + name: "ftssel.x.{sve_type[0]}" + + - name: svprf{size_literal} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf.{sve_type}" + arguments: + ["pg: {predicate}", "base: *crate::ffi::c_void", "op: svprfop"] + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - $OP + + - name: svprf{size_literal}_vnum + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset_vnum: predicated + substitutions: + textual_size: + match_size: "{type}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + arguments: ["pg: {predicate}", "base: *T", "vnum: i64"] + static_defs: ["const OP: svprfop", T] + types: [b8, b16, b32, b64] + assert_instr: + - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"] + test: { load: 0 } + compose: + - FnCall: + - "svprf{size_literal}" + - - $pg + - MethodCall: + - $base + - offset + - - Multiply: + - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize] + - CastAs: [$vnum, isize] + - - $OP + - _ + + - name: svprf{size_literal[1]}_gather_[{type[0]}]{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + indices_or_offsets: + { match_size: "{type[1]}", default: "indices", byte: "offsets" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[i32, u32, i64, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "base: *T", "{indices_or_offsets}: {sve_type[0]}"] + static_defs: ["const OP: svprfop", T] + assert_instr: + [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]] + test: { load: 0 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "prf{size_literal[1]}.gather.{type_kind[0].su}xtw.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + doubleword: + LLVMLink: + name: "prf{size_literal[1]}.gather.index.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "base: *crate::ffi::c_void" + - "{indices_or_offsets}: {sve_type[0]}" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - CastAs: [$base, "*const crate::ffi::c_void"] + - "${indices_or_offsets}" + - $OP + + - name: svprf{size_literal[1]}_gather[_{type[0]}base] + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "op: svprfop" + - FnCall: ["{llvm_link}", [$pg, $bases, 0, $OP]] + + - name: svprf{size_literal[1]}_gather[_{type[0]}base]_{index_or_offset} + attr: [*sve-unstable] + safety: + unsafe: + - pointer_offset: predicated + - no_provenance: bases + substitutions: + index_or_offset: + { match_size: "{type[1]}", default: "index", byte: "offset" } + textual_size: + match_size: "{type[1]}" + default: words + byte: bytes + halfword: halfwords + doubleword: doublewords + doc: Prefetch {textual_size} + types: + - [[u32, u64], [i8, i16, i32, i64]] + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "{index_or_offset}: i64"] + static_defs: ["const OP: svprfop"] + assert_instr: [["prfb", "OP = {{svprfop::SV_PLDL1KEEP}}"]] + test: { load: 0 } + compose: + - LLVMLink: + name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}" + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "{index_or_offset}: i64" + - "op: svprfop" + - FnCall: + - "{llvm_link}" + - - $pg + - $bases + - MatchSize: + - "{type[1]}" + - byte: $offset + halfword: { MethodCall: [$index, unchecked_shl, [1]] } + default: { MethodCall: [$index, unchecked_shl, [2]] } + doubleword: { MethodCall: [$index, unchecked_shl, [3]] } + - $OP + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[f32, f64], [i32, u32, i64, u64]] + zeroing_method: { drop: inactive } + substitutions: + convert_from: { match_kind: "{type[1]}", default: s, unsigned: u } + assert_instr: ["{convert_from}cvtf"] + compose: + - LLVMLink: + name: "{convert_from}cvtf.{type[0]}{type[1]}" + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i32, u32, i64, u64], [f32, f64]] + zeroing_method: { drop: inactive } + substitutions: + convert_to: { match_kind: "{type[0]}", default: s, unsigned: u } + assert_instr: ["fcvtz{convert_to}"] + compose: + - LLVMLink: { name: "fcvtz{convert_to}.{type[0]}{type[1]}" } + + - name: svcvt_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Floating-point convert + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64], [f64, f32]] + zeroing_method: { drop: inactive } + assert_instr: [fcvt] + compose: + - LLVMLink: { name: "fcvt.{type[0]}{type[1]}" } + + - name: svreinterpret_{type[0]}[_{type[1]}] + attr: [*sve-unstable] + doc: Reinterpret vector contents + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + assert_instr: [] + types: + - - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + + - name: svrinta[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties away from zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinta] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinta.{sve_type}" } + + - name: svrinti[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (inexact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frinti] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frinti.{sve_type}" } + + - name: svrintm[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards -∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintm] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintm.{sve_type}" } + + - name: svrintn[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round to nearest, ties to even + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintn] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintn.{sve_type}" } + + - name: svrintp[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards +∞ + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintp] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintp.{sve_type}" } + + - name: svrintx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round using current rounding mode (exact) + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintx.{sve_type}" } + + - name: svrintz[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Round towards zero + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frintz] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frintz.{sve_type}" } + + - name: svabd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute difference + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f64, f32, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}abd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind}abd.{sve_type}" } + + - name: svabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}abs"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}abs.{sve_type}" } + + - name: svand[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [and] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "and.{sve_type}" } + + - name: svandv[_{type}] + attr: [*sve-unstable] + doc: Bitwise AND reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [andv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "andv.{sve_type}" } + + - name: svand[_b]_z + attr: [*sve-unstable] + doc: Bitwise AND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [and] + compose: + - LLVMLink: { name: "and.z.nvx16i1" } + + - name: svmov[_b]_z + attr: [*sve-unstable] + doc: Move + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [mov] + compose: + - FnCall: ["svand_b_z", [$pg, $op, $op]] + + - name: svbic[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bic] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "bic.{sve_type}" } + + - name: svbic[_b]_z + attr: [*sve-unstable] + doc: Bitwise clear + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [bic] + compose: + - LLVMLink: { name: "bic.z.nvx16i1" } + + - name: sveor[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "eor.{sve_type}" } + + - name: sveorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [eorv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "eorv.{sve_type}" } + + - name: sveor[_b]_z + attr: [*sve-unstable] + doc: Bitwise exclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [eor] + compose: + - LLVMLink: { name: "eor.z.nvx16i1" } + + - name: svnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [not] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "not.{sve_type}" } + + - name: svnot[_b]_z + attr: [*sve-unstable] + doc: Bitwise invert + arguments: ["pg: svbool_t", "op: svbool_t"] + return_type: svbool_t + assert_instr: [not] + compose: + - FnCall: ["sveor_b_z", [$pg, $op, $pg]] + + - name: svcnot[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logically invert boolean condition + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [cnot] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "cnot.{sve_type}" } + + - name: svnand[_b]_z + attr: [*sve-unstable] + doc: Bitwise NAND + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nand] + compose: + - LLVMLink: { name: "nand.z.nxv16i1" } + + - name: svnor[_b]_z + attr: [*sve-unstable] + doc: Bitwise NOR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [nor] + compose: + - LLVMLink: { name: "nor.z.nxv16i1" } + + - name: svorr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [orr] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "orr.{sve_type}" } + + - name: svorv[_{type}] + attr: [*sve-unstable] + doc: Bitwise inclusive OR reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [orv] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: { name: "orv.{sve_type}" } + + - name: svorr[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orr] + compose: + - LLVMLink: { name: "orr.z.nvx16i1" } + + - name: svorn[_b]_z + attr: [*sve-unstable] + doc: Bitwise inclusive OR, inverting second argument + arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"] + return_type: svbool_t + assert_instr: [orn] + compose: + - LLVMLink: { name: "orn.z.nvx16i1" } + + - name: svlsl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], u8] + - [[i16, u16], u16] + - [[i32, u32], u32] + - [[i64, u64], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.{sve_type[0]}" } + + - name: svlsl_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32, u8, u16, u32], u64] + assert_instr: [lsl] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsl.wide.{sve_type[0]}" } + + - name: svasr[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.{sve_type[0]}" } + + - name: svasr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, i16, i32], u64] + assert_instr: [asr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "asr.wide.{sve_type[0]}" } + + - name: svasrd[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Arithmetic shift right for divide by immediate + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64] + assert_instr: [[asrd, "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "asrd.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svlsr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.{sve_type}" } + + - name: svlsr_wide[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Logical shift right + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[u8, u16, u32], u64] + assert_instr: [lsr] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "lsr.wide.{sve_type[0]}" } + + - name: svadda[_{type}] + attr: [*sve-unstable] + doc: Add reduction (strictly-ordered) + arguments: ["pg: {predicate}", "initial: {type}", "op: {sve_type}"] + return_type: "{type}" + assert_instr: [fadda] + types: [f32, f64] + compose: + - LLVMLink: { name: "fadda.{sve_type}" } + + - name: svaddv[_{type}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i64, u64] + assert_instr: [{ float: faddv, default: uaddv }] + compose: + - LLVMLink: { name: "{type_kind.fsu}addv.{sve_type}" } + + - name: svaddv[_{type[0]}] + attr: [*sve-unstable] + doc: Add reduction + arguments: ["pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{type[1]}" + types: + - [[i8, i16, i32], i64] + - [[u8, u16, u32], u64] + assert_instr: ["{type_kind[0].su}addv"] + compose: + - LLVMLink: { name: "{type_kind[0].su}addv.{sve_type[0]}" } + + - name: svmaxv[_{type}] + attr: [*sve-unstable] + doc: Maximum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxv.{sve_type}" } + + - name: svmaxnmv[_{type}] + attr: [*sve-unstable] + doc: Maximum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fmaxnmv] + compose: + - LLVMLink: { name: "fmaxnmv.{sve_type}" } + + - name: svminv[_{type}] + attr: [*sve-unstable] + doc: Minimum reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minv"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minv.{sve_type}" } + + - name: svminnmv[_{type}] + attr: [*sve-unstable] + doc: Minimum number reduction to scalar + arguments: ["pg: {predicate}", "op: {sve_type}"] + return_type: "{type}" + types: [f32, f64] + assert_instr: [fminnmv] + compose: + - LLVMLink: { name: "fminnmv.{sve_type}" } + + - name: svmul[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.f}mul"] + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.f}mul.{sve_type}" } + + - name: svmulh[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply, returning high-half + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["{type_kind.su}mulh"] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}mulh.{sve_type}" } + + - name: svmulx[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply extended (∞×0=2) + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: ["fmulx"] + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fmulx.{sve_type}" } + + - name: svrecpe[_{type}] + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpe] + compose: + - LLVMLink: { name: "frecpe.x.{sve_type}" } + + - name: svrecps[_{type}] + attr: [*sve-unstable] + doc: Reciprocal step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecps] + compose: + - LLVMLink: { name: "frecps.x.{sve_type}" } + + - name: svrecpx[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal exponent + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frecpx] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "frecpx.x.{sve_type}" } + + - name: svrsqrte[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrte] + compose: + - LLVMLink: { name: "frsqrte.x.{sve_type}" } + + - name: svrsqrts[_{type}] + attr: [*sve-unstable] + doc: Reciprocal square root step + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: [frsqrts] + compose: + - LLVMLink: { name: "frsqrts.x.{sve_type}" } + + - name: svmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mad"] + compose: + - LLVMLink: { name: "{type_kind.f}mad.{sve_type}" } + + - name: svmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mla"] + compose: + - LLVMLink: { name: "{type_kind.f}mla.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}mls"] + compose: + - LLVMLink: { name: "{type_kind.f}mls.{sve_type}" } + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [f32, f64] + assert_instr: [[fmls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "fmls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: ["{type_kind.f}msb"] + compose: + - LLVMLink: { name: "{type_kind.f}msb.{sve_type}" } + + - name: svnmad[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmad] + compose: + - LLVMLink: { name: "fnmad.{sve_type}" } + + - name: svnmla[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-add, addend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmla] + compose: + - LLVMLink: { name: "fnmla.{sve_type}" } + + - name: svnmls[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, minuend first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmls] + compose: + - LLVMLink: { name: "fnmls.{sve_type}" } + + - name: svnmsb[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negated multiply-subtract, multiplicand first + arguments: + - "pg: {predicate}" + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + n_variant_op: op3 + assert_instr: [fnmsb] + compose: + - LLVMLink: { name: "fnmsb.{sve_type}" } + + - name: svneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64] + assert_instr: ["{type_kind.f}neg"] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "{type_kind.f}neg.{sve_type}" } + + - name: svqadd[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating add + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.x.{sve_type}" } + + - name: svadr{size_literal[2]}[_{type[0]}base]_[{type[1]}]{index_or_offset} + attr: [*sve-unstable] + substitutions: + index_or_offset: { match_size: "{type[2]}", default: index, byte: offset } + indices_or_offsets: + { match_size: "{type[2]}", default: indices, byte: offsets } + doc: Compute vector addresses for {size[2]}-bit data + arguments: ["bases: {sve_type[0]}", "{indices_or_offsets}: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u32, [i32, u32], [i8, i16, i32, i64]] + - [u64, [i64, u64], [i8, i16, i32, i64]] + assert_instr: [adr] + compose: + - LLVMLink: { name: "adr{size_literal[2]}.{sve_type[0]}" } + + - name: svdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: ["{type_kind[0].su}dot"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}dot.{sve_type[0]}" } + + - name: svdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: + - [i32, i8] + - [i64, i16] + - [u32, u8] + - [u64, u16] + assert_instr: [["{type_kind[0].su}dot", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}dot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svusdot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - LLVMLink: { name: "usdot.{sve_type[0]}" } + + - name: svusdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, u8, i8]] + assert_instr: [[usdot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "usdot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svsudot[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8, u8]] + assert_instr: [usdot] + n_variant_op: op3 + compose: + - FnCall: ["svusdot_{type[0]}", [$op1, $op3, $op2]] + + - name: svsudot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Dot product (signed × unsigned) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }] + types: [[i32, i8, u8]] + assert_instr: [[sudot, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sudot.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[2]}" + - "imm_index: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svdiv[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}div"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}div.{sve_type}" } + + - name: svdivr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Divide reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i32, i64, u32, u64] + assert_instr: ["{type_kind.fsu}divr"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}divr.{sve_type}" } + + - name: svexpa[_{type[0]}] + attr: [*sve-unstable] + doc: Floating-point exponential accelerator + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, u32], [f64, u64]] + assert_instr: [fexpa] + compose: + - LLVMLink: { name: "fexpa.x.{sve_type[0]} " } + + - name: svscale[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Adjust exponent + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [fscale] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "fscale.{sve_type[0]}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f32mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f32] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [f64mm] + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [f64] + assert_instr: [fmmla] + compose: + - LLVMLink: { name: "fmmla.{sve_type}" } + + - name: svmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [u32, u8]] + assert_instr: ["{type_kind[0].su}mmla"] + compose: + - LLVMLink: { name: "{type_kind[0].su}mmla.{sve_type[0]}" } + + - name: svusmmla[_{type[0]}] + attr: [*sve-unstable] + doc: Matrix multiply-accumulate (unsigned × signed) + target_features: [i8mm] + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"] + return_type: "{sve_type[0]}" + types: [[i32, u8, i8]] + assert_instr: [usmmla] + compose: + - LLVMLink: { name: "usmmla.{sve_type[0]}" } + + - name: svmin[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.fsu}min"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.fsu}min.{sve_type}" } + + - name: svminnm[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Minimum number + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + zeroing_method: { select: op1 } + assert_instr: [fminnm] + n_variant_op: op2 + compose: + - LLVMLink: { name: "fminnm.{sve_type}" } diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml new file mode 100644 index 000000000000..6365bea21b51 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml @@ -0,0 +1,3196 @@ +arch_cfgs: + - arch_name: aarch64 + target_feature: [sve, sve2] + llvm_prefix: llvm.aarch64.sve + +auto_llvm_sign_conversion: true +generate_load_store_tests: true + +# `#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")]` +sve-unstable: &sve-unstable + FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] + +intrinsics: + - name: svbext[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Gather lower bits from positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bext] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bext.x.{sve_type}" } + + - name: svbgrp[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Group bits to right or left as selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bgrp] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bgrp.x.{sve_type}" } + + - name: svbdep[{_n}_{type}] + attr: [*sve-unstable] + target_features: [sve2-bitperm] + doc: Scatter lower bits into positions selected by bitmask + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u16, u32, u64] + assert_instr: [bdep] + n_variant_op: op2 + compose: + - LLVMLink: { name: "bdep.x.{sve_type}" } + + - name: svhistcnt[_{type[0]}]_z + attr: [*sve-unstable] + doc: Count matching elements + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i32, u32], [i64, u64], [u32, u32], [u64, u64]] + assert_instr: [histcnt] + compose: + - LLVMLink: { name: "histcnt.{sve_type[0]}" } + + - name: svhistseg[_{type[0]}] + attr: [*sve-unstable] + doc: Count matching elements in 128-bit segments + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[i8, u8], [u8, u8]] + assert_instr: [histseg] + compose: + - LLVMLink: { name: "histseg.{sve_type[0]}" } + + - name: svmatch[_{type}] + attr: [*sve-unstable] + doc: Detect any matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [match] + compose: + - LLVMLink: { name: "match.{sve_type}" } + + - name: svnmatch[_{type}] + attr: [*sve-unstable] + doc: Detect no matching elements + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{predicate}" + types: [i8, i16, u8, u16] + assert_instr: [nmatch] + compose: + - LLVMLink: { name: "nmatch.{sve_type}" } + + - name: svhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}hadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hadd.{sve_type}" } + + - name: svrhadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding halving add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}rhadd"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}rhadd.{sve_type}" } + + - name: svaddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnb.{sve_type[0]}" } + + - name: svaddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [addhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "addhnt.{sve_type[0]}" } + + - name: svraddhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (bottom) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnb.{sve_type[0]}" } + + - name: svraddhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding add narrow high part (top) + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + return_type: "{sve_type[1]}" + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + assert_instr: [raddhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "raddhnt.{sve_type[0]}" } + + - name: svcadd[_{type}] + attr: [*sve-unstable] + doc: Complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }] + assert_instr: [[cadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cadd.x.{sve_type} + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_ROTATION]] + + - name: svcdot[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cdot, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcdot_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Complex dot product + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i32, i8], [i64, i16]] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - { variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cdot, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cdot.lane.{sve_type[0]} + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svcmla[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[cmla, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svcmla_lane[_{type}] + attr: [*sve-unstable] + doc: Complex multiply-add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32, u16, u32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[cmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: cmla.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqrdcmlah[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }] + assert_instr: [[sqrdcmlah, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]] + + - name: svqrdcmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling complex multiply-add high with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i16, i32] + static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"] + constraints: + - variable: IMM_INDEX + range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] } + - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] } + assert_instr: [[sqrdcmlah, "IMM_INDEX = 0, IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: sqrdcmlah.lane.x.{sve_type} + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "imm_index: i32" + - "imm_rotation: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]] + + - name: svqcadd[_{type}] + attr: [*sve-unstable] + doc: Saturating complex add with rotate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + static_defs: ["const IMM_ROTATION: i32"] + constraints: [{ variable: "IMM_ROTATION", any_values: [90, 270] }] + assert_instr: [[sqcadd, "IMM_ROTATION = 90"]] + compose: + - LLVMLink: + name: "sqcadd.x.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"] + - FnCall: ["{llvm_link}", ["$op1", "$op2", "$IMM_ROTATION"]] + + - name: svsublb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublb"] + n_variant_op: op2 + compose: + - LLVMLink: + name: "{type_kind[0].su}sublb.{sve_type[0]}" + + - name: svsublbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (bottom - top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssublbt] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssublbt.{sve_type[0]}" + + - name: svsublt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}sublt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}sublt.{sve_type[0]}" } + + - name: svsubltb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract long (top - bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: [ssubltb] + n_variant_op: op2 + compose: + - LLVMLink: + name: "ssubltb.{sve_type[0]}" + + - name: svsubwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwb.{sve_type[0]}" } + + - name: svsubwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}subwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}subwt.{sve_type[0]}" } + + - name: svrsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnb.{sve_type[0]}" } + + - name: svrsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Rounding subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [rsubhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "rsubhnt.{sve_type[0]}" } + + - name: svsubhnb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnb.{sve_type[0]}" } + + - name: svsubhnt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Subtract narrow high part (top) + arguments: + ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [subhnt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "subhnt.{sve_type[0]}" } + + - name: svsbclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclb.{sve_type}" } + + - name: svsbclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Subtract with borrow long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [sbclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sbclt.{sve_type}" } + + - name: svqsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsub"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsub.{sve_type}" } + + - name: svqsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + zeroing_method: { select: op1 } + assert_instr: ["{type_kind.su}qsubr"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qsubr.{sve_type}" } + + - name: svhsub[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsub.{sve_type}" } + + - name: svhsubr[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Halving subtract reversed + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}hsub"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}hsubr.{sve_type}" } + + - name: svwhilege_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than or equal to + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilege, unsigned: whilehs }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilege.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehs.{sve_type[1]}.{type[0]}" } } + + - name: svwhilegt_{sve_type[1]}[_{type[0]}] + attr: [*sve-unstable] + doc: While decrementing scalar is greater than + arguments: ["op1: {type[0]}", "op2: {type[0]}"] + return_type: "{sve_type[1]}" + types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]] + assert_instr: [{ default: whilegt, unsigned: whilehi }] + compose: + - MatchKind: + - "{type[0]}" + - default: { LLVMLink: { name: "whilegt.{sve_type[1]}.{type[0]}" } } + unsigned: { LLVMLink: { name: "whilehi.{sve_type[1]}.{type[0]}" } } + + - name: svwhilerw_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilerw.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilerw[_{type}] + attr: [*sve-unstable] + doc: While free of read-after-write conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilerw] + compose: + - FnCall: + - "svwhilerw_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svwhilewr_{size}ptr + attr: [*sve-unstable] + safety: + unsafe: [] + visibility: private + static_defs: [T] + substitutions: + size_alt: + match_size: "{type}" + byte: b + halfword: h + default: s + doubleword: d + arguments: ["op1: *T", "op2: *T"] + return_type: "{predicate}" + types: [i8, i16, i32, i64] + assert_instr: [] + compose: + - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]] + - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]] + - LLVMLink: + name: "whilewr.{size_alt}.{predicate}.p0" + arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"] + + - name: svwhilewr[_{type}] + attr: [*sve-unstable] + doc: While free of write-after-read conflicts + # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't + # seem to be documented, so we conservatively keep this unsafe for now. + safety: + unsafe: + - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints + must be met for at least the base pointers, `op1` and `op2`." + arguments: ["op1: *{type}", "op2: *{type}"] + return_type: "svbool_t" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [whilewr] + compose: + - FnCall: + - "svwhilewr_{size}ptr" + - - $op1 + - $op2 + - - Type: "{type}" + + - name: svtbl2[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in two-vector table + arguments: ["data: {sve_type_x2[0]}", "indices: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbl] + compose: + - LLVMLink: + name: "tbl2.{sve_type[0]}" + arguments: + - "data0: {sve_type[0]}" + - "data1: {sve_type[0]}" + - "indices: {sve_type[1]}" + - FnCall: + - "{llvm_link}" + - - FnCall: ["svget2_{type[0]}", ["$data"], [0]] + - FnCall: ["svget2_{type[0]}", ["$data"], [1]] + - $indices + + - name: svtbx[_{type[0]}] + attr: [*sve-unstable] + doc: Table lookup in single-vector table (merging) + arguments: + - "fallback: {sve_type[0]}" + - "data: {sve_type[0]}" + - "indices: {sve_type[1]}" + return_type: "{sve_type[0]}" + types: + - [f32, u32] + - [f64, u64] + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + - [u8, u8] + - [u16, u16] + - [u32, u32] + - [u64, u64] + assert_instr: [tbx] + compose: + - LLVMLink: { name: "tbx.{sve_type[0]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_m + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - LLVMLink: { name: "fcvtlt.{type[0]}{type[1]}" } + + - name: svcvtlt_{type[0]}[_{type[1]}]_x + attr: [*sve-unstable] + doc: Up convert long (top) + arguments: ["pg: svbool_t", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f64, f32]] + assert_instr: [fcvtlt] + compose: + - FnCall: + - "svcvtlt_{type[0]}_{type[1]}_m" + - - FnCall: ["crate::intrinsics::transmute_unchecked", [$op], [], true] + - $pg + - $op + + - name: svcvtnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert and narrow (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtnt] + compose: + - LLVMLink: { name: "fcvtnt.{type[0]}{type[1]}" } + + - name: svcvtx_{type[0]}[_{type[1]}]{_mxz} + attr: [*sve-unstable] + doc: Down convert, rounding to odd + arguments: + ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + zeroing_method: { drop: inactive } + assert_instr: [fcvtx] + compose: + - LLVMLink: { name: "fcvtx.{type[0]}{type[1]}" } + + - name: svcvtxnt_{type[0]}[_{type[1]}]{_mx} + attr: [*sve-unstable] + doc: Down convert, rounding to odd (top) + arguments: + ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[f32, f64]] + assert_instr: [fcvtxnt] + compose: + - LLVMLink: { name: "fcvtxnt.{type[0]}{type[1]}" } + + - name: svldnt1_gather_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: { name: "ldnt1.gather.index.{sve_type[1]}" } + + - name: svldnt1_gather_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: { name: "ldnt1.gather.uxtw.{sve_type[1]}" } + doubleword: + LLVMLink: { name: "ldnt1.gather.{sve_type[1]}" } + + - name: svldnt1_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}" + + - name: svldnt1_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Unextended load, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["ldnt1{size_literal[0]}"] + test: { load: 1 } + compose: + - FnCall: + - "svldnt1_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [i64, u64], [i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [[i64, u64], [u64, i64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $indices]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [[i64, u64], [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + + - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [[i64, u64], [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + doubleword: + LLVMLink: + name: "ldnt1.gather.{sve_type[1] as {type[2]}}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [u32, i32], [u8, u16]] + - [u64, [u64, i64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - LLVMLink: + name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + return_type: "{sve_type[1] as {type[2]}}" + - FnCall: + - "crate::intrinsics::simd::simd_cast" + - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]] + - - Type: "{sve_type[1] as {type[2]}}" + - _ + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], [u8, u16]] + - [u64, [i64, u64], [u8, u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + + - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and sign-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1s{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]} + attr: [*sve-unstable] + doc: Load {size[2]}-bit data and zero-extend, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"] + return_type: "{sve_type[1]}" + types: + - [u32, [i32, u32], u16] + - [u64, [i64, u64], [u16, u32]] + assert_instr: ["ldnt1{size_literal[2]}"] + test: { load: 2 } + compose: + - FnCall: + - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + + - name: svstnt1_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "indices: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]] + + - name: svstnt1_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [[i64, u64], [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1]}" + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "base: *mut {type[1]}" + - "offsets: {sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]] + + - name: svstnt1_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - LLVMLink: + arguments: + - "data: {sve_type[1]}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + name: "stnt1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}" + - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]] + + - name: svstnt1_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Non-truncating store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [f32, i32, u32]] + - [u64, [f64, i64, u64]] + assert_instr: ["stnt1{size_literal[0]}"] + test: { store: 1 } + compose: + - FnCall: + - "svstnt1_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]] + - $data + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [[i64, u64], i64, [i16, i32]] + - [[i64, u64], u64, [u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.index.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "indices: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $indices] + + - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - "data: {sve_type[1]}" + types: + - [u32, i32, [i8, i16]] + - [u32, u32, [u8, u16]] + - [[i64, u64], i64, [i8, i16, i32]] + - [[i64, u64], u64, [u8, u16, u32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - MatchSize: + - "{type[0]}" + - default: + LLVMLink: + name: "stnt1.scatter.uxtw.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + doubleword: + LLVMLink: + name: "stnt1.scatter.{sve_type[1] as {type[2]}}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "base: *mut {type[2]}" + - "offsets: {sve_type[0]}" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $base, $offsets] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - LLVMLink: + name: "stnt1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}" + arguments: + - "data: {sve_type[1] as {type[2]}}" + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "offset: i64" + - FnCall: + - "{llvm_link}" + - [FnCall: ["crate::intrinsics::simd::simd_cast", [$data]], $pg, $bases, $offset] + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"] + types: + - [u32, [i32, u32], [i8, i16]] + - [u64, [i64, u64], [i8, i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - 0 + - $data + + - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}] + attr: [*sve-unstable] + doc: Truncate to {size[2]} bits and store, non-temporal + safety: + unsafe: + - pointer_offset: predicated + - dereference: predicated + - no_provenance: bases + - non_temporal + arguments: + - "pg: {predicate[0]}" + - "bases: {sve_type[0]}" + - "index: i64" + - "data: {sve_type[1]}" + types: + - [u32, [i32, u32], i16] + - [u64, [i64, u64], [i16, i32]] + assert_instr: ["stnt1{size_literal[2]}"] + test: { store: 2 } + compose: + - FnCall: + - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}" + - - $pg + - $bases + - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]] + - $data + + - name: svaba[{_n}_{type}] + attr: [*sve-unstable] + doc: Absolute difference and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind}aba"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind}aba.{sve_type}" } + + - name: svqabs[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating absolute value + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqabs] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqabs.{sve_type}" } + + - name: svabdlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlb.{sve_type[0]}" } + + - name: svabdlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abdlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}abdlt.{sve_type[0]}" } + + - name: svabalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalb.{sve_type[0]}" } + + - name: svabalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Absolute difference long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}abalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}abalt.{sve_type[0]}" } + + - name: svbcax[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise clear and exclusive OR + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bcax] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bcax.{sve_type}" } + + - name: sveorbt[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (bottom, top) + arguments: ["odd: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eorbt] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eorbt.{sve_type}" } + + - name: sveortb[{_n}_{type}] + attr: [*sve-unstable] + doc: Interleaving exclusive OR (top, bottom) + arguments: ["even: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eortb] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op2 + compose: + - LLVMLink: { name: "eortb.{sve_type}" } + + - name: sveor3[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR of three vectors + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [eor3] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "eor3.{sve_type}" } + + - name: svbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl.{sve_type}" } + + - name: svbsl1n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with first input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl1n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl1n.{sve_type}" } + + - name: svbsl2n[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select with second input inverted + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [bsl2n] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "bsl2n.{sve_type}" } + + - name: svnbsl[{_n}_{type}] + attr: [*sve-unstable] + doc: Bitwise select + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [nbsl] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + n_variant_op: op3 + compose: + - LLVMLink: { name: "nbsl.{sve_type}" } + + - name: svxar[_n_{type}] + attr: [*sve-unstable] + doc: Bitwise exclusive OR and rotate right + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + assert_instr: [[xar, "IMM3 = 1"]] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + compose: + - LLVMLink: + name: "xar.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrax1[_{type}] + attr: [*sve-unstable] + doc: Bitwise rotate left by 1 and exclusive OR + target_features: [sve2-sha3] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + assert_instr: [rax1] + types: [i64, u64] + compose: + - LLVMLink: { name: "rax1" } + + - name: svshllb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (bottom) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllb", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllb.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshllt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift left long (top) + arguments: ["op1: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}shllt", "IMM2 = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}shllt.{sve_type[0]}" + arguments: ["op1: {sve_type[1]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}rshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}rshl.{sve_type[0]}" } + + - name: svqrshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating rounding shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qrshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qrshl.{sve_type[0]}" } + + - name: svqshl[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [[i8, u8], i8] + - [[i16, u16], i16] + - [[i32, u32], i32] + - [[i64, u64], i64] + assert_instr: ["{type_kind[0].su}qshl"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}qshl.{sve_type[0]}" } + + - name: svqshlu[_n_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating shift left unsigned + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }] + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [[sqshlu, "IMM2 = 0"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "sqshlu.{sve_type[0]}" + arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svsli[_n_{type}] + attr: [*sve-unstable] + doc: Shift left and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["0", "{size_minus_one}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sli, "IMM3 = 0"]] + compose: + - LLVMLink: + name: "sli.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshr[_n_{type}]{_mxz} + attr: [*sve-unstable] + doc: Rounding shift right + arguments: ["pg: {predicate}", "op1: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rshr", "IMM2 = 1"]] + zeroing_method: { select: op1 } + compose: + - LLVMLink: + name: "{type_kind.su}rshr.{sve_type}" + arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]] + + - name: svrsra[_n_{type}] + attr: [*sve-unstable] + doc: Rounding shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}rsra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}rsra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[rshrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "rshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qrshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qrshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqrshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqrshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating rounding shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqrshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqrshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnb", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [["{type_kind[0].su}qshrnt", "IMM2 = 1"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}qshrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqshrunb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svqshrunt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Saturating shift right unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [[sqshrunt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "sqshrunt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svsra[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and accumulate + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [["{type_kind.su}sra", "IMM3 = 1"]] + compose: + - LLVMLink: + name: "{type_kind.su}sra.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svsri[_n_{type}] + attr: [*sve-unstable] + doc: Shift right and insert + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM3: i32"] + constraints: [{ variable: IMM3, range: ["1", "{size}"] }] + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: [[sri, "IMM3 = 1"]] + compose: + - LLVMLink: + name: "sri.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]] + + - name: svshrnb[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (bottom) + arguments: ["op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnb, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnb.{sve_type[0]}" + arguments: ["op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$op1, $IMM2]] + + - name: svshrnt[_n_{type[0]}] + attr: [*sve-unstable] + doc: Shift right narrow (top) + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"] + return_type: "{sve_type[1]}" + static_defs: ["const IMM2: i32"] + constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }] + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: [[shrnt, "IMM2 = 1"]] + compose: + - LLVMLink: + name: "shrnt.{sve_type[0]}" + arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"] + - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]] + + - name: svqxtnb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnb"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnb.{sve_type[0]}" } + + - name: svqxtnt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}qxtnt"] + compose: + - LLVMLink: { name: "{type_kind[0].su}qxtnt.{sve_type[0]}" } + + - name: svqxtunb[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (bottom) + arguments: ["op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunb] + compose: + - LLVMLink: { name: "sqxtunb.{sve_type[0]}" } + + - name: svqxtunt[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating extract unsigned narrow (top) + arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: + - [i16, u8] + - [i32, u16] + - [i64, u32] + assert_instr: [sqxtunt] + compose: + - LLVMLink: { name: "sqxtunt.{sve_type[0]}" } + + - name: svmovlb[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (bottom) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllb"] + compose: + - FnCall: ["svshllb_n_{type[0]}", [$op], [0]] + + - name: svmovlt[_{type[0]}] + attr: [*sve-unstable] + doc: Move long (top) + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}shllt"] + compose: + - FnCall: ["svshllt_n_{type[0]}", [$op], [0]] + + - name: svunpkhi[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpkhi"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpkhi.{sve_type[0]}" } + + - name: svunpkhi[_b] + attr: [*sve-unstable] + doc: Unpack and extend high half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpkhi] + compose: + - LLVMLink: { name: "punpkhi.nxv16i1" } + + - name: svunpklo[_{type[0]}] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}unpklo"] + compose: + - LLVMLink: { name: "{type_kind[0].su}unpklo.{sve_type[0]}" } + + - name: svunpklo[_b] + attr: [*sve-unstable] + doc: Unpack and extend low half + arguments: ["op: svbool_t"] + return_type: "svbool8_t" + assert_instr: [punpklo] + compose: + - LLVMLink: { name: "punpklo.nxv16i1" } + + - name: svaddp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Add pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.f}addp"] + compose: + - LLVMLink: { name: "{type_kind.f}addp.{sve_type}" } + + - name: svadalp[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Add and accumulate long pairwise + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}adalp"] + zeroing_method: { select: op1 } + compose: + - LLVMLink: { name: "{type_kind[0].su}adalp.{sve_type[0]}" } + + - name: svmaxp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}maxp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}maxp.{sve_type}" } + + - name: svmaxnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Maximum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fmaxnmp"] + compose: + - LLVMLink: { name: "fmaxnmp.{sve_type}" } + + - name: svminp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.fsu}minp"] + compose: + - LLVMLink: { name: "{type_kind.fsu}minp.{sve_type}" } + + - name: svminnmp[_{type}]{_mx} + attr: [*sve-unstable] + doc: Minimum number pairwise + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [f32, f64] + assert_instr: ["fminnmp"] + compose: + - LLVMLink: { name: "fminnmp.{sve_type}" } + + - name: svmul_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["{type_kind.f}mul", "IMM_INDEX = 0"]] + types: [f32, f64, i16, i32, i64, u16, u32, u64] + compose: + - LLVMLink: + name: "{type_kind.f}mul.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmulh.{sve_type}" } + + - name: svqdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqrdmulh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmulh] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqrdmulh.{sve_type}" } + + - name: svqrdmulh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply high + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + assert_instr: [["sqrdmulh", "IMM_INDEX = 0"]] + types: [i16, i32, i64] + compose: + - LLVMLink: + name: "sqrdmulh.lane.{sve_type}" + arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullb.{sve_type[0]}" } + + - name: svqdmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullb", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svqdmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: [sqdmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "sqdmullt.{sve_type[0]}" } + + - name: svqdmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["sqdmullt", "IMM_INDEX = 0"]] + types: [[i32, i16], [i64, i32]] + compose: + - LLVMLink: + name: "sqdmullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]] + + - name: svmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" } + + - name: svmullb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullb.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}mullt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" } + + - name: svmullt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i32, i16] + - [i64, i32] + - [u32, u16] + - [u64, u32] + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mullt.lane.{sve_type[0]}" + arguments: + ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"] + - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] + + - name: svrecpe[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [urecpe] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "urecpe.{sve_type}" } + + - name: svrsqrte[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Reciprocal square root estimate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [ursqrte] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "ursqrte.{sve_type}" } + + - name: svmla_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-add, addend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mla, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mla.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmls_lane[_{type}] + attr: [*sve-unstable] + doc: Multiply-subtract, minuend first + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64, u16, u32, u64] + assert_instr: [[mls, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "mls.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalb.{sve_type[0]}" } + + - name: svmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlalt.{sve_type[0]}" } + + - name: svmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslb.{sve_type[0]}" } + + - name: svmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]] + assert_instr: ["{type_kind[0].su}mlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "{type_kind[0].su}mlslt.{sve_type[0]}" } + + - name: svmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]] + assert_instr: [["{type_kind[0].su}mlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "{type_kind[0].su}mlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlah[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlah] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlah.{sve_type}" } + + - name: svqrdmlah_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-add high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlah, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlah.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqrdmlsh[{_n}_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqrdmlsh] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqrdmlsh.{sve_type}" } + + - name: svqrdmlsh_lane[_{type}] + attr: [*sve-unstable] + doc: Saturating rounding doubling multiply-subtract high + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }] + types: [i16, i32, i64] + assert_instr: [[sqrdmlsh, "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqrdmlsh.lane.{sve_type}" + arguments: + - "op1: {sve_type}" + - "op2: {sve_type}" + - "op3: {sve_type}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalb.{sve_type[0]}" } + + - name: svqdmlalb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlalbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalbt.{sve_type[0]}" } + + - name: svqdmlalt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlalt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlalt.{sve_type[0]}" } + + - name: svqdmlalt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-add long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlalt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlalt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslb"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslb.{sve_type[0]}" } + + - name: svqdmlslb_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslb", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslb.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqdmlslbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (bottom × top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslbt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslbt.{sve_type[0]}" } + + - name: svqdmlslt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[i16, i8], [i32, i16], [i64, i32]] + assert_instr: ["sqdmlslt"] + n_variant_op: op3 + compose: + - LLVMLink: { name: "sqdmlslt.{sve_type[0]}" } + + - name: svqdmlslt_lane[_{type[0]}] + attr: [*sve-unstable] + doc: Saturating doubling multiply-subtract long (top) + arguments: + ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"] + return_type: "{sve_type[0]}" + static_defs: ["const IMM_INDEX: i32"] + constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] + types: [[i32, i16], [i64, i32]] + assert_instr: [["sqdmlslt", "IMM_INDEX = 0"]] + compose: + - LLVMLink: + name: "sqdmlslt.lane.{sve_type[0]}" + arguments: + - "op1: {sve_type[0]}" + - "op2: {sve_type[1]}" + - "op3: {sve_type[1]}" + - "IMM_INDEX: i32" + - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]] + + - name: svqneg[_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating negate + arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64] + assert_instr: [sqneg] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "sqneg.{sve_type}" } + + - name: svadclb[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (bottom) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclb] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclb.{sve_type}" } + + - name: svadclt[{_n}_{type}] + attr: [*sve-unstable] + doc: Add with carry long (top) + arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"] + return_type: "{sve_type}" + types: [u32, u64] + assert_instr: [adclt] + n_variant_op: op3 + compose: + - LLVMLink: { name: "adclt.{sve_type}" } + + - name: svqadd[{_n}_{type}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add + arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [i8, i16, i32, i64, u8, u16, u32, u64] + assert_instr: ["{type_kind.su}qadd"] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind.su}qadd.{sve_type}" } + + - name: svsqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with signed addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [u8, i8] + - [u16, i16] + - [u32, i32] + - [u64, i64] + assert_instr: [usqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "usqadd.{sve_type[0]}" } + + - name: svuqadd[{_n}_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Saturating add with unsigned addend + arguments: + ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i8, u8] + - [i16, u16] + - [i32, u32] + - [i64, u64] + assert_instr: [suqadd] + zeroing_method: { select: op1 } + n_variant_op: op2 + compose: + - LLVMLink: { name: "suqadd.{sve_type[0]}" } + + - name: svaddlb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlb.{sve_type[0]}" } + + - name: svaddlbt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (bottom + top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + assert_instr: ["{type_kind[0].su}addlbt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlbt.{sve_type[0]}" } + + - name: svaddlt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add long (top) + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addlt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addlt.{sve_type[0]}" } + + - name: svaddwb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (bottom) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwb"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwb.{sve_type[0]}" } + + - name: svaddwt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Add wide (top) + arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: + - [i16, i8] + - [i32, i16] + - [i64, i32] + - [u16, u8] + - [u32, u16] + - [u64, u32] + assert_instr: ["{type_kind[0].su}addwt"] + n_variant_op: op2 + compose: + - LLVMLink: { name: "{type_kind[0].su}addwt.{sve_type[0]}" } + + - name: svlogb[_{type[0]}]{_mxz} + attr: [*sve-unstable] + doc: Base 2 logarithm as integer + arguments: + ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"] + return_type: "{sve_type[1]}" + types: [[f32, i32], [f64, i64]] + assert_instr: [flogb] + zeroing_method: { drop: inactive } + compose: + - LLVMLink: { name: "flogb.{sve_type[0]}" } + + - name: svpmul[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [pmul] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmul.{sve_type}" } + + - name: svpmullb_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullb.pair.{sve_type}" } + + - name: svpmullb[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (bottom) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullb] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullb_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svpmullt_pair[{_n}_{type}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8, u32, u64] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - LLVMLink: { name: "pmullt.pair.{sve_type}" } + + - name: svpmullt[{_n}_{type[0]}] + attr: [*sve-unstable] + doc: Polynomial multiply long (top) + target_features: [sve2-aes] + arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] + return_type: "{sve_type[0]}" + types: [[u16, u8], [u64, u32]] + assert_instr: [pmullt] + n_variant_op: op2 + compose: + - FnCall: + - "crate::intrinsics::transmute_unchecked" + - [FnCall: ["svpmullt_pair_{type[1]}", [$op1, $op2]]] + - [] + - true + + - name: svaesd[_{type}] + attr: [*sve-unstable] + doc: AES single round decryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesd] + compose: + - LLVMLink: { name: "aesd" } + + - name: svaese[_{type}] + attr: [*sve-unstable] + doc: AES single round encryption + target_features: [sve2-aes] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aese] + compose: + - LLVMLink: { name: "aese" } + + - name: svaesmc[_{type}] + attr: [*sve-unstable] + doc: AES mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesmc] + compose: + - LLVMLink: { name: "aesmc" } + + - name: svaesimc[_{type}] + attr: [*sve-unstable] + doc: AES inverse mix columns + target_features: [sve2-aes] + arguments: ["op: {sve_type}"] + return_type: "{sve_type}" + types: [u8] + assert_instr: [aesimc] + compose: + - LLVMLink: { name: "aesimc" } + + - name: svsm4e[_{type}] + attr: [*sve-unstable] + doc: SM4 encryption and decryption + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4e] + compose: + - LLVMLink: { name: "sm4e" } + + - name: svsm4ekey[_{type}] + attr: [*sve-unstable] + doc: SM4 key updates + target_features: [sve2-sm4] + arguments: ["op1: {sve_type}", "op2: {sve_type}"] + return_type: "{sve_type}" + types: [u32] + assert_instr: [sm4ekey] + compose: + - LLVMLink: { name: "sm4ekey" } From a753cf4d77ebb6c39e984200087b1976c1d80c38 Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 15 Jan 2026 16:29:25 +0000 Subject: [PATCH 11/20] core_arch: generated sve intrinsics Following from previous commit, this commit only contains generated code from the SVE intrinsic specifications Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../core_arch/src/aarch64/sve/generated.rs | 44957 ++++++++++++++++ .../src/aarch64/sve/ld_st_tests_aarch64.rs | 9345 ++++ .../core_arch/src/aarch64/sve2/generated.rs | 23856 ++++++++ .../src/aarch64/sve2/ld_st_tests_aarch64.rs | 2482 + 4 files changed, 80640 insertions(+) create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs create mode 100644 library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index 8b137891791f..6edfc8e159a7 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -1 +1,44958 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; +use crate::core_arch::arch::aarch64::*; + +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")] + fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svabd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")] + fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabd))] +pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svabd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")] + fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_s8_m(pg, op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svabd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")] + fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svabd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")] + fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svabd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")] + fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabd))] +pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svabd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")] + fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svabd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")] + fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svabd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svabd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")] + fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svabd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svabd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")] + fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svabd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, op1, op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Absolute difference"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabd))] +pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svabd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")] + fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svabs_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svabs_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")] + fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svabs_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fabs))] +pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svabs_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")] + fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svabs_s8_m(inactive, pg, op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")] + fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")] + fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")] + fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(op, pg, op) +} +#[doc = "Absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(abs))] +pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")] + fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")] + fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")] + fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svacgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")] + fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svacgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Absolute compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacge_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svacle_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacge_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facge))] +pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svacle_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svacgt_f32(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svaclt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svacgt_f64(pg, op2, op1) +} +#[doc = "Absolute compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(facgt))] +pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svaclt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")] + fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svadd_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svadd_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")] + fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svadd_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadd))] +pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svadd_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")] + fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svadd_s8_m(pg, op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")] + fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")] + fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")] + fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, op1, op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(add))] +pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")] + fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32; + } + unsafe { _svadda_f32(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction (strictly-ordered)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fadda))] +pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")] + fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64; + } + unsafe { _svadda_f64(pg.sve_into(), initial, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")] + fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svaddv_f32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddv))] +pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")] + fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svaddv_f64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")] + fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_s64(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")] + fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svaddv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")] + fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_s8(pg, op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")] + fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_s16(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddv))] +pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")] + fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_s32(pg.sve_into(), op) } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")] + fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64; + } + unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")] + fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64; + } + unsafe { _svaddv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Add reduction"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddv))] +pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")] + fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64; + } + unsafe { _svaddv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")] + fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t; + } + unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")] + fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")] + fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")] + fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t { + unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")] + fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t; + } + unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")] + fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")] + fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")] + fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() } +} +#[doc = "Compute vector addresses for 8-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t { + unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) } +} +#[doc = "Compute vector addresses for 16-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 32-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Compute vector addresses for 64-bit data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adr))] +pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")] + fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svand_b_z(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")] + fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svand_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svand_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")] + fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svand_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svand_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")] + fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svand_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svand_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")] + fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svand_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svand_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svand_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svand_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svand_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, op1, op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise AND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(and))] +pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svand_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")] + fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svandv_s8(pg, op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")] + fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svandv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")] + fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svandv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")] + fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svandv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise AND reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(andv))] +pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")] + fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svasr_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")] + fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svasr_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svasr_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")] + fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svasr_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svasr_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")] + fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svasr_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svasr_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv16i8" + )] + fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svasr_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv8i16" + )] + fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svasr_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svasr_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.asr.wide.nxv4i32" + )] + fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svasr_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, op1, op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Arithmetic shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asr))] +pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svasr_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")] + fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svasrd_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svasrd_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")] + fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svasrd_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svasrd_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")] + fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svasrd_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svasrd_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")] + fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svasrd_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, op1) +} +#[doc = "Arithmetic shift right for divide by immediate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] +pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svasrd_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")] + fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbic_b_z(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")] + fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbic_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svbic_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")] + fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbic_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svbic_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")] + fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbic_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svbic_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")] + fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbic_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svbic_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svbic_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svbic_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svbic_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, op1, op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise clear"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bic))] +pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svbic_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")] + fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_m(inactive, pg, op) } +} +#[doc = "Break after first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brka))] +pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")] + fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrka_b_z(pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")] + fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_m(inactive, pg, op) } +} +#[doc = "Break before first true condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkb))] +pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")] + fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svbrkb_b_z(pg, op) } +} +#[doc = "Propagate break to next partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkn))] +pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")] + fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkn_b_z(pg, op1, op2) } +} +#[doc = "Break after first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpa))] +pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1" + )] + fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpa_b_z(pg, op1, op2) } +} +#[doc = "Break before first true condition, propagating from previous partition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(brkpb))] +pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1" + )] + fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svbrkpb_b_z(pg, op1, op2) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")] + fn _svcadd_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcadd_f32_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, +) -> svfloat32_t { + svcadd_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")] + fn _svcadd_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcadd_f64_m(pg.sve_into(), op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, op1, op2) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] +pub fn svcadd_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, +) -> svfloat64_t { + svcadd_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")] + fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclasta_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")] + fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclasta_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")] + fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclasta_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")] + fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclasta_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")] + fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclasta_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")] + fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclasta_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4f32" + )] + fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclasta_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2f64" + )] + fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclasta_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv16i8" + )] + fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclasta_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv8i16" + )] + fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclasta_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv4i32" + )] + fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclasta_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clasta.n.nxv2i64" + )] + fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclasta_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clasta))] +pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")] + fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t; + } + unsafe { _svclastb_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")] + fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t; + } + unsafe { _svclastb_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")] + fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t; + } + unsafe { _svclastb_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")] + fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t; + } + unsafe { _svclastb_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")] + fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t; + } + unsafe { _svclastb_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")] + fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t; + } + unsafe { _svclastb_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t { + unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t { + unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t { + unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t { + unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4f32" + )] + fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32; + } + unsafe { _svclastb_n_f32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2f64" + )] + fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64; + } + unsafe { _svclastb_n_f64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv16i8" + )] + fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8; + } + unsafe { _svclastb_n_s8(pg, fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv8i16" + )] + fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16; + } + unsafe { _svclastb_n_s16(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv4i32" + )] + fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32; + } + unsafe { _svclastb_n_s32(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.clastb.n.nxv2i64" + )] + fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64; + } + unsafe { _svclastb_n_s64(pg.sve_into(), fallback, data) } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { + unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { + unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { + unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Conditionally extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clastb))] +pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { + unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")] + fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcls_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcls_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")] + fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcls_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcls_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcls_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")] + fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcls_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcls_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcls_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")] + fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcls_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcls_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading sign bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cls))] +pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcls_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")] + fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svclz_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svclz_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")] + fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svclz_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svclz_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svclz_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")] + fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svclz_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svclz_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svclz_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")] + fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svclz_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svclz_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svclz_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svclz_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svclz_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svclz_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svclz_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svclz_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svclz_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svclz_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(op, pg, op) +} +#[doc = "Count leading zero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(clz))] +pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svclz_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")] + fn _svcmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_f32_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svcmla_f32_m::(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")] + fn _svcmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + imm_rotation: i32, + ) -> svfloat64_t; + } + unsafe { _svcmla_f64_m(pg.sve_into(), op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, op1, op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] +pub fn svcmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svcmla_f64_m::(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32" + )] + fn _svcmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svfloat32_t; + } + unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")] + fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpeq_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpeq_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")] + fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpeq_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmeq))] +pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpeq_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")] + fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpeq_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpeq_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")] + fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpeq_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpeq_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")] + fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpeq_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpeq_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")] + fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpeq_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpeq_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpeq_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpeq_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpeq_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpeq_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8" + )] + fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpeq_wide_s8(pg, op1, op2) } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16" + )] + fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpeq_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32" + )] + fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpeq_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpeq))] +pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")] + fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpge_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpge_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")] + fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpge_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpge_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")] + fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpge_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")] + fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpge_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")] + fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpge_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")] + fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpge_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")] + fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpge_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")] + fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpge_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpge_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")] + fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpge_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpge_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")] + fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpge_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpge_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8" + )] + fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpge_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16" + )] + fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpge_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32" + )] + fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpge_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8" + )] + fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpge_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16" + )] + fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpge_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpge_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32" + )] + fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpge_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpge_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")] + fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpgt_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpgt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")] + fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpgt_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpgt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")] + fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpgt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")] + fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpgt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")] + fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpgt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")] + fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpgt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")] + fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpgt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")] + fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpgt_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpgt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")] + fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpgt_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpgt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")] + fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpgt_u64(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpgt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8" + )] + fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16" + )] + fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32" + )] + fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8" + )] + fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16" + )] + fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpgt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32" + )] + fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpgt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpge_f32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmple_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpge_f64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmge))] +pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmple_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpge_s8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmple_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpge_s16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmple_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpge_s32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmple_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpge_s64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpge))] +pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmple_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpge_u8(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmple_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpge_u16(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmple_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpge_u32(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmple_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpge_u64(pg, op2, op1) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphs))] +pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmple_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8" + )] + fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmple_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16" + )] + fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmple_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32" + )] + fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmple))] +pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmple_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8" + )] + fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmple_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16" + )] + fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmple_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmple_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32" + )] + fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmple_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpls))] +pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmple_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + svcmpgt_f32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmplt_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + svcmpgt_f64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmgt))] +pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmplt_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + svcmpgt_s8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmplt_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + svcmpgt_s16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmplt_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + svcmpgt_s32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmplt_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + svcmpgt_s64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpgt))] +pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmplt_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + svcmpgt_u8(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmplt_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + svcmpgt_u16(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmplt_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + svcmpgt_u32(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmplt_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + svcmpgt_u64(pg, op2, op1) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmphi))] +pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmplt_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8" + )] + fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_s8(pg, op1, op2) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmplt_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16" + )] + fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmplt_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32" + )] + fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplt))] +pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmplt_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8" + )] + fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { + svcmplt_wide_u8(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16" + )] + fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmplt_wide_u16(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { + svcmplt_wide_u16(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32" + )] + fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmplt_wide_u32(pg.sve_into(), op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Compare less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmplo))] +pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { + svcmplt_wide_u32(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")] + fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpne_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpne_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")] + fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpne_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmne))] +pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpne_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")] + fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svcmpne_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { + svcmpne_s8(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")] + fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svcmpne_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { + svcmpne_s16(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")] + fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t; + } + unsafe { _svcmpne_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { + svcmpne_s32(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")] + fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t; + } + unsafe { _svcmpne_s64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { + svcmpne_s64(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { + svcmpne_u8(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { + svcmpne_u16(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { + unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { + svcmpne_u32(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { + unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { + svcmpne_u64(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8" + )] + fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t; + } + unsafe { _svcmpne_wide_s8(pg, op1, op2) } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { + svcmpne_wide_s8(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16" + )] + fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t; + } + unsafe { _svcmpne_wide_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { + svcmpne_wide_s16(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32" + )] + fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t; + } + unsafe { _svcmpne_wide_s32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare not equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmpne))] +pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { + svcmpne_wide_s32(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")] + fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t; + } + unsafe { _svcmpuo_f32(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { + svcmpuo_f32(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")] + fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t; + } + unsafe { _svcmpuo_f64(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Compare unordered with"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcmuo))] +pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { + svcmpuo_f64(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")] + fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnot_s8_m(inactive, pg, op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svcnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")] + fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svcnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")] + fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svcnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")] + fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svcnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(op, pg, op) +} +#[doc = "Logically invert boolean condition"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnot))] +pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")] + fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcnt_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcnt_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcnt_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")] + fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcnt_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcnt_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcnt_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")] + fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { + svcnt_s8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")] + fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svcnt_s16_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { + svcnt_s16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")] + fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcnt_s32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { + svcnt_s32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")] + fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcnt_s64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { + svcnt_s64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svcnt_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svcnt_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svcnt_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) } +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(op, pg, op) +} +#[doc = "Count nonzero bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnt))] +pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svcnt_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svcntb() -> u64 { + svcntb_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svcnth() -> u64 { + svcnth_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svcntw() -> u64 { + svcntw_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svcntd() -> u64 { + svcntd_pat::<{ svpattern::SV_ALL }>() +} +#[doc = "Count the number of 8-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] +# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))] +pub fn svcntb_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")] + fn _svcntb_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntb_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 16-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcnth_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")] + fn _svcnth_pat(pattern: svpattern) -> i64; + } + unsafe { _svcnth_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 32-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntw_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")] + fn _svcntw_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntw_pat(PATTERN).as_unsigned() } +} +#[doc = "Count the number of 64-bit elements in a vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] +pub fn svcntd_pat() -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")] + fn _svcntd_pat(pattern: svpattern) -> i64; + } + unsafe { _svcntd_pat(PATTERN).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")] + fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64; + } + unsafe { _svcntp_b8(pg, op).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")] + fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64; + } + unsafe { _svcntp_b16(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")] + fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64; + } + unsafe { _svcntp_b32(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Count set predicate bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntp))] +pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")] + fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64; + } + unsafe { _svcntp_b64(pg.sve_into(), op.sve_into()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4f32" + )] + fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svcompact_f32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2f64" + )] + fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svcompact_f64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv4i32" + )] + fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svcompact_s32(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.compact.nxv2i64" + )] + fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svcompact_s64(pg.sve_into(), op) } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Shuffle active elements of vector to the right and fill with zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(compact))] +pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create2(x0, x1) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create3(x0, x1, x2) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f32( + x0: svfloat32_t, + x1: svfloat32_t, + x2: svfloat32_t, + x3: svfloat32_t, +) -> svfloat32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_f64( + x0: svfloat64_t, + x1: svfloat64_t, + x2: svfloat64_t, + x3: svfloat64_t, +) -> svfloat64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u16( + x0: svuint16_t, + x1: svuint16_t, + x2: svuint16_t, + x3: svuint16_t, +) -> svuint16x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u32( + x0: svuint32_t, + x1: svuint32_t, + x2: svuint32_t, + x3: svuint32_t, +) -> svuint32x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Create a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svcreate4_u64( + x0: svuint64_t, + x1: svuint64_t, + x2: svuint64_t, + x3: svuint64_t, +) -> svuint64x4_t { + unsafe { crate::intrinsics::simd::scalable::sve_tuple_create4(x0, x1, x2, x3) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")] + fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvt_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")] + fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvt_f64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvt))] +pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i32")] + fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { + unsafe { svcvt_f32_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { + svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")] + fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { + unsafe { svcvt_f32_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { + svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i32")] + fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + unsafe { svcvt_f32_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { + svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")] + fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t; + } + unsafe { _svcvt_f32_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + unsafe { svcvt_f32_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { + svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i32")] + fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { + unsafe { svcvt_f64_s32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { + svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f64i64")] + fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { + unsafe { svcvt_f64_s64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(scvtf))] +pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { + svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i32")] + fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u32_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + unsafe { svcvt_f64_u32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { + svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f64i64")] + fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t; + } + unsafe { _svcvt_f64_u64_m(inactive, pg.sve_into(), op.as_signed()) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + unsafe { svcvt_f64_u64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ucvtf))] +pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { + svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")] + fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_s32_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svcvt_s32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svcvt_s32_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")] + fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_s32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { + unsafe { svcvt_s32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { + svcvt_s32_f64_m(svdup_n_s32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")] + fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_s64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { + unsafe { svcvt_s64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { + svcvt_s64_f32_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")] + fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_s64_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svcvt_s64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzs))] +pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svcvt_s64_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")] + fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + unsafe { svcvt_u32_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { + svcvt_u32_f32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")] + fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t; + } + unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + unsafe { svcvt_u32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { + svcvt_u32_f64_m(svdup_n_u32(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")] + fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t; + } + unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + unsafe { svcvt_u64_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { + svcvt_u64_f32_m(svdup_n_u64(0), pg, op) +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")] + fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.sve_into(), op).as_unsigned() } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + unsafe { svcvt_u64_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Floating-point convert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtzu))] +pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { + svcvt_u64_f64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")] + fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdiv_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdiv_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")] + fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdiv_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdiv))] +pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdiv_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")] + fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdiv_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")] + fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdiv))] +pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdiv_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")] + fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdiv_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdiv_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")] + fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdiv_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, op1, op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udiv))] +pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdiv_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")] + fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svdivr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svdivr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")] + fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svdivr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fdivr))] +pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svdivr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")] + fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svdivr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")] + fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdivr))] +pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svdivr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")] + fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svdivr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svdivr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")] + fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svdivr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, op1, op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Divide reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udivr))] +pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svdivr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32" + )] + fn _svdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] +pub fn svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64" + )] + fn _svdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u32( + op1: svuint32_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv4i32" + )] + fn _svdot_lane_u32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { + _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] +pub fn svdot_lane_u64( + op1: svuint64_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.udot.lane.nxv2i64" + )] + fn _svdot_lane_u64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + ) -> svint64_t; + } + unsafe { + _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned() + } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")] + fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_s32(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { + svdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")] + fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_s64(op1, op2, op3) } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sdot))] +pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { + svdot_s64(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")] + fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { + svdot_u32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")] + fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t; + } + unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(udot))] +pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { + svdot_u64(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { + svtbl_f32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + svtbl_f64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { + svtbl_s8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { + svtbl_s16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { + svtbl_s32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { + svtbl_s64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { + svtbl_u8(data, svdup_n_u8(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { + svtbl_u16(data, svdup_n_u16(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { + svtbl_u32(data, svdup_n_u32(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + svtbl_u64(data, svdup_n_u64(index)) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b8(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")] + fn _svdup_n_b8(op: bool) -> svbool_t; + } + unsafe { _svdup_n_b8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b16(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")] + fn _svdup_n_b16(op: bool) -> svbool8_t; + } + unsafe { _svdup_n_b16(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b32(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")] + fn _svdup_n_b32(op: bool) -> svbool4_t; + } + unsafe { _svdup_n_b32(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbfx))] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svdup_n_b64(op: bool) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")] + fn _svdup_n_b64(op: bool) -> svbool2_t; + } + unsafe { _svdup_n_b64(op).sve_into() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32(op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")] + fn _svdup_n_f32(op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64(op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")] + fn _svdup_n_f64(op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8(op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")] + fn _svdup_n_s8(op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16(op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")] + fn _svdup_n_s16(op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32(op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")] + fn _svdup_n_s32(op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64(op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")] + fn _svdup_n_s64(op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64(op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8(op: u8) -> svuint8_t { + unsafe { svdup_n_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16(op: u16) -> svuint16_t { + unsafe { svdup_n_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32(op: u32) -> svuint32_t { + unsafe { svdup_n_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64(op: u64) -> svuint64_t { + unsafe { svdup_n_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")] + fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t; + } + unsafe { _svdup_n_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { + svdup_n_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")] + fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t; + } + unsafe { _svdup_n_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { + svdup_n_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")] + fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t; + } + unsafe { _svdup_n_s8_m(inactive, pg, op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { + svdup_n_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")] + fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t; + } + unsafe { _svdup_n_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { + svdup_n_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")] + fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t; + } + unsafe { _svdup_n_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { + svdup_n_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")] + fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t; + } + unsafe { _svdup_n_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { + svdup_n_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { + unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { + svdup_n_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t { + unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { + svdup_n_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t { + unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { + svdup_n_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t { + unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a scalar value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { + svdup_n_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32" + )] + fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t; + } + unsafe { _svdupq_lane_f32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64" + )] + fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t; + } + unsafe { _svdupq_lane_f64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8" + )] + fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t; + } + unsafe { _svdupq_lane_s8(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16" + )] + fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t; + } + unsafe { _svdupq_lane_s16(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32" + )] + fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t; + } + unsafe { _svdupq_lane_s32(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64" + )] + fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t; + } + unsafe { _svdupq_lane_s64(data, index.as_signed()) } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { + unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { + unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { + unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { + unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b16( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, +) -> svbool_t { + let op1 = svdupq_n_s16( + x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16, + ); + svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { + let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32); + svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { + let op1 = svdupq_n_s64(x0 as i64, x1 as i64); + svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_b8( + x0: bool, + x1: bool, + x2: bool, + x3: bool, + x4: bool, + x5: bool, + x6: bool, + x7: bool, + x8: bool, + x9: bool, + x10: bool, + x11: bool, + x12: bool, + x13: bool, + x14: bool, + x15: bool, +) -> svbool_t { + let op1 = svdupq_n_s8( + x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8, + x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8, + ); + svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0)) +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + )] + fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; + } + unsafe { + let op = _svdupq_n_f32(svundef_f32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_f32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + )] + fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; + } + unsafe { + let op = _svdupq_n_s32(svundef_s32(), crate::mem::transmute([x0, x1, x2, x3]), 0); + svdupq_lane_s32(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { + unsafe { + svdupq_n_s32( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + )] + fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; + } + unsafe { + let op = _svdupq_n_f64(svundef_f64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_f64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + )] + fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; + } + unsafe { + let op = _svdupq_n_s64(svundef_s64(), crate::mem::transmute([x0, x1]), 0); + svdupq_lane_s64(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { + unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s16( + x0: i16, + x1: i16, + x2: i16, + x3: i16, + x4: i16, + x5: i16, + x6: i16, + x7: i16, +) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + )] + fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; + } + unsafe { + let op = _svdupq_n_s16( + svundef_s16(), + crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]), + 0, + ); + svdupq_lane_s16(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u16( + x0: u16, + x1: u16, + x2: u16, + x3: u16, + x4: u16, + x5: u16, + x6: u16, + x7: u16, +) -> svuint16_t { + unsafe { + svdupq_n_s16( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_s8( + x0: i8, + x1: i8, + x2: i8, + x3: i8, + x4: i8, + x5: i8, + x6: i8, + x7: i8, + x8: i8, + x9: i8, + x10: i8, + x11: i8, + x12: i8, + x13: i8, + x14: i8, + x15: i8, +) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + )] + fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; + } + unsafe { + let op = _svdupq_n_s8( + svundef_s8(), + crate::mem::transmute([ + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, + ]), + 0, + ); + svdupq_lane_s8(op, 0) + } +} +#[doc = "Broadcast a quadword of scalars"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svdupq_n_u8( + x0: u8, + x1: u8, + x2: u8, + x3: u8, + x4: u8, + x5: u8, + x6: u8, + x7: u8, + x8: u8, + x9: u8, + x10: u8, + x11: u8, + x12: u8, + x13: u8, + x14: u8, + x15: u8, +) -> svuint8_t { + unsafe { + svdupq_n_s8( + x0.as_signed(), + x1.as_signed(), + x2.as_signed(), + x3.as_signed(), + x4.as_signed(), + x5.as_signed(), + x6.as_signed(), + x7.as_signed(), + x8.as_signed(), + x9.as_signed(), + x10.as_signed(), + x11.as_signed(), + x12.as_signed(), + x13.as_signed(), + x14.as_signed(), + x15.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")] + fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _sveor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")] + fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveor_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + sveor_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")] + fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveor_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + sveor_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")] + fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveor_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + sveor_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")] + fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveor_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + sveor_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveor_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveor_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveor_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, op1, op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor))] +pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveor_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")] + fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _sveorv_s8(pg, op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")] + fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _sveorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")] + fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _sveorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")] + fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _sveorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorv))] +pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 " + )] + fn _svexpa_f32(op: svint32_t) -> svfloat32_t; + } + unsafe { _svexpa_f32(op.as_signed()) } +} +#[doc = "Floating-point exponential accelerator"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fexpa))] +pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 " + )] + fn _svexpa_f64(op: svint64_t) -> svfloat64_t; + } + unsafe { _svexpa_f64(op.as_signed()) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")] + fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svext_f32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")] + fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svext_f64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")] + fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svext_s8(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")] + fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svext_s16(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")] + fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svext_s32(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")] + fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svext_s64(op1, op2, IMM3) } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=255); + unsafe { svext_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=127); + unsafe { svext_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svext_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract vector from pair of vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ext, IMM3 = 1))] +pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svext_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")] + fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svextb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")] + fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svextb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")] + fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svexth_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")] + fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtb))] +pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")] + fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxth))] +pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svexth_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")] + fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(op, pg, op) +} +#[doc = "Sign-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sxtw))] +pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svextw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")] + fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svextb_u16_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svextb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")] + fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svextb_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svextb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")] + fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svexth_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svexth_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")] + fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextb_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 8 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtb))] +pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")] + fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svexth_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 16 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxth))] +pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svexth_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")] + fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svextw_u64_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(op, pg, op) +} +#[doc = "Zero-extend the low 32 bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uxtw))] +pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svextw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Extract one vector from a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_get::<_, _, { IMM_INDEX }>(tuple) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s8(base: i8, step: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")] + fn _svindex_s8(base: i8, step: i8) -> svint8_t; + } + unsafe { _svindex_s8(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s16(base: i16, step: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")] + fn _svindex_s16(base: i16, step: i16) -> svint16_t; + } + unsafe { _svindex_s16(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s32(base: i32, step: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")] + fn _svindex_s32(base: i32, step: i32) -> svint32_t; + } + unsafe { _svindex_s32(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_s64(base: i64, step: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")] + fn _svindex_s64(base: i64, step: i64) -> svint64_t; + } + unsafe { _svindex_s64(base, step) } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { + unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { + unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { + unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Create linear series"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(index))] +pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { + unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")] + fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t; + } + unsafe { _svinsr_n_f32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")] + fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t; + } + unsafe { _svinsr_n_f64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")] + fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t; + } + unsafe { _svinsr_n_s8(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")] + fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t; + } + unsafe { _svinsr_n_s16(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")] + fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t; + } + unsafe { _svinsr_n_s32(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")] + fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t; + } + unsafe { _svinsr_n_s64(op1, op2) } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Insert scalar in shifted vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(insr))] +pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")] + fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlasta_f32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")] + fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlasta_f64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")] + fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlasta_s8(pg, op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")] + fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlasta_s16(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")] + fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlasta_s32(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")] + fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlasta_s64(pg.sve_into(), op) } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract element after last"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lasta))] +pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")] + fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svlastb_f32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")] + fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svlastb_f64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")] + fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svlastb_s8(pg, op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")] + fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svlastb_s16(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")] + fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svlastb_s32(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")] + fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svlastb_s64(pg.sve_into(), op) } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Extract last element"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lastb))] +pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")] + fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")] + fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")] + fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1_s8(pg, base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")] + fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")] + fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")] + fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32" + )] + fn _svld1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32" + )] + fn _svld1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64" + )] + fn _svld1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64" + )] + fn _svld1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32" + )] + fn _svld1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32" + )] + fn _svld1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svld1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32" + )] + fn _svld1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32" + )] + fn _svld1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64" + )] + fn _svld1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svld1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64" + )] + fn _svld1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svld1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32" + )] + fn _svld1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svld1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32" + )] + fn _svld1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svld1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svld1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svld1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svld1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svld1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svld1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svld1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svld1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svld1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1d))] +pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")] + fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1ro_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")] + fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1ro_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")] + fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1ro_s8(pg, base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")] + fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1ro_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")] + fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1ro_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")] + fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1ro_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rob))] +pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1ro_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1roh))] +pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1ro_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1row))] +pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1ro_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 256 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rod))] +pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1ro_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")] + fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svld1rq_f32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")] + fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svld1rq_f64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")] + fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svld1rq_s8(pg, base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")] + fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svld1rq_s16(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")] + fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svld1rq_s32(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")] + fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svld1rq_s64(pg.sve_into(), base) +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqb))] +pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svld1rq_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqh))] +pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svld1rq_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqw))] +pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svld1rq_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load and replicate 128 bits of data"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1rqd))] +pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svld1rq_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_s32offset_s32(pg.sve_into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32offset_s32(pg.sve_into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64offset_s64(pg.sve_into(), base, offsets)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svld1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svld1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svld1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svld1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svld1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svld1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svld1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sb))] +pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s32index_s32(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svld1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svld1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sh))] +pub unsafe fn svld1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1sw))] +pub unsafe fn svld1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8" + )] + fn _svld1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16" + )] + fn _svld1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8" + )] + fn _svld1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16" + )] + fn _svld1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32" + )] + fn _svld1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8" + )] + fn _svld1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16" + )] + fn _svld1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svld1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svld1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svld1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svld1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svld1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")] + fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")] + fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")] + fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")] + fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svld1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")] + fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")] + fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svld1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svld1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svld1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svld1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svld1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svld1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1b))] +pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svld1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16" + )] + fn _svld1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16" + )] + fn _svld1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32" + )] + fn _svld1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svld1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svld1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16" + )] + fn _svld1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svld1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svld1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svld1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1h))] +pub unsafe fn svld1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld1w))] +pub unsafe fn svld1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4f32" + )] + fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t; + } + _svld2_f32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2f64" + )] + fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t; + } + _svld2_f64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv16i8" + )] + fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t; + } + _svld2_s8(pg, base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv8i16" + )] + fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t; + } + _svld2_s16(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv4i32" + )] + fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t; + } + _svld2_s32(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld2.sret.nxv2i64" + )] + fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t; + } + _svld2_s64(pg.sve_into(), base) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { + svld2_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { + svld2_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { + svld2_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { + svld2_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t { + svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t { + svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t { + svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t { + svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t { + svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t { + svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2b))] +pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t { + svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2h))] +pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t { + svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2w))] +pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t { + svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load two-element tuples into two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld2d))] +pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t { + svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4f32" + )] + fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t; + } + _svld3_f32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2f64" + )] + fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t; + } + _svld3_f64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv16i8" + )] + fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t; + } + _svld3_s8(pg, base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv8i16" + )] + fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t; + } + _svld3_s16(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv4i32" + )] + fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t; + } + _svld3_s32(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld3.sret.nxv2i64" + )] + fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t; + } + _svld3_s64(pg.sve_into(), base) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { + svld3_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { + svld3_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { + svld3_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { + svld3_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t { + svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t { + svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t { + svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t { + svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t { + svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t { + svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3b))] +pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t { + svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3h))] +pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t { + svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3w))] +pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t { + svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load three-element tuples into three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld3d))] +pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t { + svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4f32" + )] + fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t; + } + _svld4_f32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2f64" + )] + fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t; + } + _svld4_f64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv16i8" + )] + fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t; + } + _svld4_s8(pg, base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv8i16" + )] + fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t; + } + _svld4_s16(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv4i32" + )] + fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t; + } + _svld4_s32(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ld4.sret.nxv2i64" + )] + fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t; + } + _svld4_s64(pg.sve_into(), base) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { + svld4_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { + svld4_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { + svld4_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { + svld4_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t { + svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t { + svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t { + svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t { + svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t { + svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t { + svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4b))] +pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t { + svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4h))] +pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t { + svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4w))] +pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t { + svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load four-element tuples into four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ld4d))] +pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t { + svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")] + fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldff1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")] + fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldff1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")] + fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldff1_s8(pg, base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")] + fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldff1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")] + fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldff1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")] + fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldff1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldff1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldff1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldff1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldff1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_f32( + pg: svbool_t, + base: *const f32, + indices: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32" + )] + fn _svldff1_gather_s32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32index_f32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_s32( + pg: svbool_t, + base: *const i32, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32" + )] + fn _svldff1_gather_s32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32index_s32(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32index_u32( + pg: svbool_t, + base: *const u32, + indices: svint32_t, +) -> svuint32_t { + svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64" + )] + fn _svldff1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64" + )] + fn _svldff1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_f32( + pg: svbool_t, + base: *const f32, + indices: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32" + )] + fn _svldff1_gather_u32index_f32( + pg: svbool4_t, + base: *const f32, + indices: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32index_f32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_s32( + pg: svbool_t, + base: *const i32, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32" + )] + fn _svldff1_gather_u32index_s32( + pg: svbool4_t, + base: *const i32, + indices: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32index_s32(pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32index_u32( + pg: svbool_t, + base: *const u32, + indices: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldff1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32" + )] + fn _svldff1_gather_s32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_s32offset_f32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32" + )] + fn _svldff1_gather_s32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_s32offset_s32(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_s32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svint32_t, +) -> svuint32_t { + svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64" + )] + fn _svldff1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldff1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64" + )] + fn _svldff1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldff1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32" + )] + fn _svldff1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldff1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32" + )] + fn _svldff1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldff1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldff1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldff1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldff1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldff1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldff1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldff1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldff1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldff1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1d))] +pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1sb_gather_s32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1sh_gather_s32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32offset_s32( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svint32_t, +) -> svuint32_t { + svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldff1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldff1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldff1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldff1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldff1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldff1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldff1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sb))] +pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_s32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_s32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s32index_s32(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s32index_u32( + pg: svbool_t, + base: *const i16, + indices: svint32_t, +) -> svuint32_t { + svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldff1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_s32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1sh_gather_u32index_s32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldff1sh_gather_u32index_s32( + pg.sve_into(), + base, + indices.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32index_u32( + pg: svbool_t, + base: *const i16, + indices: svuint32_t, +) -> svuint32_t { + svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sh))] +pub unsafe fn svldff1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1sw))] +pub unsafe fn svldff1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svint32_t { + svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8" + )] + fn _svldff1ub_gather_s32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16" + )] + fn _svldff1uh_gather_s32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s32offset_u32(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8" + )] + fn _svldff1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16" + )] + fn _svldff1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32" + )] + fn _svldff1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8" + )] + fn _svldff1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16" + )] + fn _svldff1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldff1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldff1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldff1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldff1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldff1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")] + fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")] + fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")] + fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")] + fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldff1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")] + fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")] + fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldff1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldff1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldff1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldff1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldff1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldff1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1b))] +pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_s32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svint32_t { + svldff1uh_gather_s32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s32index_u32( + pg: svbool_t, + base: *const u16, + indices: svint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_s32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s32index_u32(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16" + )] + fn _svldff1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32" + )] + fn _svldff1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_s32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svint32_t { + svldff1uh_gather_u32index_u32(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32index_u32( + pg: svbool_t, + base: *const u16, + indices: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16" + )] + fn _svldff1uh_gather_u32index_u32( + pg: svbool4_t, + base: *const i16, + indices: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldff1uh_gather_u32index_u32(pg.sve_into(), base.as_signed(), indices.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1h))] +pub unsafe fn svldff1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, first-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldff1w))] +pub unsafe fn svldff1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")] + fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnf1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")] + fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnf1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")] + fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnf1_s8(pg, base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")] + fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnf1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")] + fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnf1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")] + fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnf1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnf1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnf1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnf1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnf1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1d))] +pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s16(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s32(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnf1sh_s32(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnf1sb_s64(pg.sve_into(), base)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnf1sh_s64(pg.sve_into(), base)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnf1sw_s64(pg.sve_into(), base)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { + svldnf1sb_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { + svldnf1sb_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { + svldnf1sh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { + svldnf1sb_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { + svldnf1sh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { + svldnf1sw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t { + svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t { + svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t { + svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t { + svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t { + svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t { + svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t { + svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t { + svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t { + svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sb))] +pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t { + svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sh))] +pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t { + svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and sign-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1sw))] +pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t { + svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")] + fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s16(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")] + fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")] + fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uh_s32(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")] + fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1ub_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")] + fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uh_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")] + fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnf1uw_s64(pg.sve_into(), base.as_signed()).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { + svldnf1ub_s16(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { + svldnf1ub_s32(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { + svldnf1uh_s32(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { + svldnf1ub_s64(pg, base).as_unsigned() +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { + svldnf1uh_s64(pg, base).as_unsigned() +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { + svldnf1uw_s64(pg, base).as_unsigned() +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t { + svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t { + svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t { + svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t { + svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t { + svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t { + svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t { + svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t { + svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t { + svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Load 8-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1b))] +pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t { + svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 16-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1h))] +pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t { + svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Load 32-bit data and zero-extend, non-faulting"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] +#[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnf1w))] +pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t { + svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")] + fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t; + } + _svldnt1_f32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")] + fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t; + } + _svldnt1_f64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")] + fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t; + } + _svldnt1_s8(pg, base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")] + fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t; + } + _svldnt1_s16(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")] + fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t; + } + _svldnt1_s32(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")] + fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t; + } + _svldnt1_s64(pg.sve_into(), base) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { + svldnt1_s8(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { + svldnt1_s16(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { + svldnt1_s32(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { + svldnt1_s64(pg, base.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t { + svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t { + svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t { + svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t { + svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t { + svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t { + svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t { + svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t { + svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t { + svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t { + svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_f32(_op: svfloat32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_f64(_op: svfloat64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_s8(_op: svint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_s16(_op: svint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_s32(_op: svint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_s64(_op: svint64_t) -> u64 { + svcntd() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdvl))] +pub fn svlen_u8(_op: svuint8_t) -> u64 { + svcntb() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cnth))] +pub fn svlen_u16(_op: svuint16_t) -> u64 { + svcnth() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntw))] +pub fn svlen_u32(_op: svuint32_t) -> u64 { + svcntw() +} +#[doc = "Count the number of elements in a full vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cntd))] +pub fn svlen_u64(_op: svuint64_t) -> u64 { + svcntd() +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")] + fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svlsl_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")] + fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsl_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svlsl_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")] + fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsl_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svlsl_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")] + fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsl_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svlsl_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsl_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsl_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsl_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsl_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8" + )] + fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t { + svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { + svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16" + )] + fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsl_wide_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t { + svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { + svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32" + )] + fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsl_wide_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t { + svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { + svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsl))] +pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")] + fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svlsr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")] + fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svlsr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svlsr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")] + fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svlsr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svlsr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")] + fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svlsr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svlsr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8" + )] + fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t; + } + unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t { + svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { + svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16" + )] + fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t; + } + unsafe { _svlsr_wide_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t { + svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t { + svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32" + )] + fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svlsr_wide_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, op1, op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t { + svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Logical shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(lsr))] +pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t { + svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")] + fn _svmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")] + fn _svmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmad))] +pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")] + fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmad_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmad_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")] + fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmad_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmad_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")] + fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmad_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmad_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")] + fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmad_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmad_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmad_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmad_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmad_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mad))] +pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmad_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")] + fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmax_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmax_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")] + fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmax_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmax))] +pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmax_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")] + fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_s8_m(pg, op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmax_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")] + fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmax_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")] + fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmax_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")] + fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smax))] +pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmax_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")] + fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmax_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")] + fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmax_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmax_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")] + fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmax_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmax_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")] + fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmax_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, op1, op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Maximum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umax))] +pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmax_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")] + fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmaxnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")] + fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, op1, op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Maximum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnm))] +pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmaxnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32" + )] + fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxnmv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmv))] +pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64" + )] + fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxnmv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")] + fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svmaxv_f32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxv))] +pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")] + fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svmaxv_f64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")] + fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_s8(pg, op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")] + fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_s16(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")] + fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_s32(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxv))] +pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")] + fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_s64(pg.sve_into(), op) } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")] + fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")] + fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svmaxv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")] + fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svmaxv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Maximum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxv))] +pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")] + fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svmaxv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")] + fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmin_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmin_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")] + fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmin_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmin))] +pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmin_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")] + fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_s8_m(pg, op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmin_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")] + fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmin_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")] + fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmin_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")] + fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smin))] +pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmin_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")] + fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmin_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")] + fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmin_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmin_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")] + fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmin_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmin_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")] + fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmin_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, op1, op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Minimum"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umin))] +pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmin_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")] + fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnm_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svminnm_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")] + fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnm_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, op1, op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Minimum number"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnm))] +pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svminnm_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv4f32" + )] + fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminnmv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum number reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmv))] +pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmv.nxv2f64" + )] + fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminnmv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")] + fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32; + } + unsafe { _svminv_f32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminv))] +pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")] + fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64; + } + unsafe { _svminv_f64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")] + fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_s8(pg, op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")] + fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_s16(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")] + fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_s32(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminv))] +pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")] + fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_s64(pg.sve_into(), op) } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")] + fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")] + fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svminv_u16(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")] + fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svminv_u32(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Minimum reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminv))] +pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")] + fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svminv_u64(pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")] + fn _svmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")] + fn _svmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla))] +pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")] + fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmla_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmla_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")] + fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmla_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmla_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")] + fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmla_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmla_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")] + fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmla_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmla_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmla_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmla_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmla_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla))] +pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmla_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32" + )] + fn _svmla_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] +pub fn svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64" + )] + fn _svmla_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")] + fn _svmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")] + fn _svmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls))] +pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")] + fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmls_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmls_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")] + fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmls_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmls_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")] + fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmls_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmls_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")] + fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmls_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmls_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmls_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmls_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmls_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls))] +pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmls_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32" + )] + fn _svmls_lane_f32( + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + IMM_INDEX: i32, + ) -> svfloat32_t; + } + unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] +pub fn svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64" + )] + fn _svmls_lane_f64( + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + IMM_INDEX: i32, + ) -> svfloat64_t; + } + unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f32mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")] + fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmmla_f32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmmla))] +pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")] + fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmmla_f64(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smmla))] +pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")] + fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_s32(op1, op2, op3) } +} +#[doc = "Matrix multiply-accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ummla))] +pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")] + fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Move"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mov))] +pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + svand_b_z(pg, op, op) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")] + fn _svmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")] + fn _svmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmsb))] +pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")] + fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svmsb_s8_m(pg, op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")] + fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) + -> svint16_t; + } + unsafe { _svmsb_s16_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")] + fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) + -> svint32_t; + } + unsafe { _svmsb_s32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")] + fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) + -> svint64_t; + } + unsafe { _svmsb_s64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, op1, op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3) +} +#[doc = "Multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(msb))] +pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")] + fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmul_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmul_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")] + fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmul_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul))] +pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmul_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")] + fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmul_s8_m(pg, op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmul_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")] + fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmul_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmul_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")] + fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmul_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmul_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")] + fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmul_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmul_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmul_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmul_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmul_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, op1, op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul))] +pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmul_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")] + fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_s8_m(pg, op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svmulh_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")] + fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svmulh_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")] + fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svmulh_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")] + fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smulh))] +pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svmulh_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")] + fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svmulh_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")] + fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmulh_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svmulh_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")] + fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmulh_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svmulh_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")] + fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmulh_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, op1, op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Multiply, returning high-half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umulh))] +pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svmulh_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")] + fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmulx_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svmulx_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")] + fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmulx_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, op1, op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Multiply extended (∞×0=2)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmulx))] +pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svmulx_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Bitwise NAND"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nand))] +pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")] + fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnand_b_z(pg, op1, op2) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")] + fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svneg_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svneg_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")] + fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svneg_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fneg))] +pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svneg_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")] + fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svneg_s8_m(inactive, pg, op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")] + fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")] + fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")] + fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(op, pg, op) +} +#[doc = "Negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(neg))] +pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")] + fn _svnmad_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmad_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")] + fn _svnmad_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmad_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmad))] +pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")] + fn _svnmla_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmla_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")] + fn _svnmla_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmla_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmla))] +pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")] + fn _svnmls_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmls_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")] + fn _svnmls_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmls_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmls))] +pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_m( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")] + fn _svnmsb_f32_m( + pg: svbool4_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, + ) -> svfloat32_t; + } + unsafe { _svnmsb_f32_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_x( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f32_z( + pg: svbool_t, + op1: svfloat32_t, + op2: svfloat32_t, + op3: svfloat32_t, +) -> svfloat32_t { + svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t { + svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_m( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")] + fn _svnmsb_f64_m( + pg: svbool2_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, + ) -> svfloat64_t; + } + unsafe { _svnmsb_f64_m(pg.sve_into(), op1, op2, op3) } +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_x( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, op1, op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_f64_z( + pg: svbool_t, + op1: svfloat64_t, + op2: svfloat64_t, + op3: svfloat64_t, +) -> svfloat64_t { + svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3) +} +#[doc = "Negated multiply-subtract, multiplicand first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fnmsb))] +pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t { + svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3)) +} +#[doc = "Bitwise NOR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nor))] +pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")] + fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svnor_b_z(pg, op1, op2) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { + sveor_b_z(pg, op, pg) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")] + fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svnot_s8_m(inactive, pg, op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svnot_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")] + fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svnot_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svnot_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")] + fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svnot_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svnot_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")] + fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svnot_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svnot_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svnot_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svnot_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svnot_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(op, pg, op) +} +#[doc = "Bitwise invert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(not))] +pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svnot_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Bitwise inclusive OR, inverting second argument"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orn))] +pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")] + fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorn_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")] + fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svorr_b_z(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")] + fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svorr_s8_m(pg, op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svorr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")] + fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svorr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svorr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")] + fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svorr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svorr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")] + fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svorr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svorr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svorr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svorr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svorr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, op1, op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Bitwise inclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orr))] +pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svorr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")] + fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8; + } + unsafe { _svorv_s8(pg, op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")] + fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16; + } + unsafe { _svorv_s16(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")] + fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32; + } + unsafe { _svorv_s32(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")] + fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64; + } + unsafe { _svorv_s64(pg.sve_into(), op) } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { + unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { + unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { + unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise inclusive OR reduction to scalar"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(orv))] +pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { + unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() } +} +#[doc = "Set all predicate elements to false"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfalse))] +pub fn svpfalse_b() -> svbool_t { + svdupq_n_b8( + false, false, false, false, false, false, false, false, false, false, false, false, false, + false, false, false, + ) +} +#[doc = "Set the first active predicate element to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pfirst))] +pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")] + fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpfirst_b(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")] + fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t; + } + unsafe { _svpnext_b8(pg, op) } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")] + fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t; + } + unsafe { _svpnext_b16(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")] + fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t; + } + unsafe { _svpnext_b32(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Find next active predicate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pnext))] +pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")] + fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t; + } + unsafe { _svpnext_b64(pg.sve_into(), op.sve_into()).sve_into() } +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")] + fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfb(pg, base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")] + fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfh(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")] + fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfw(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd(pg: svbool_t, base: *const T) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")] + fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop); + } + _svprfd(pg.sve_into(), base as *const crate::ffi::c_void, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s32offset( + pg: svbool_t, + base: *const T, + offsets: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32" + )] + fn _svprfb_gather_s32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_s32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32" + )] + fn _svprfh_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32" + )] + fn _svprfw_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s32index( + pg: svbool_t, + base: *const T, + indices: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32" + )] + fn _svprfd_gather_s32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_s32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_s64offset( + pg: svbool_t, + base: *const T, + offsets: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64" + )] + fn _svprfb_gather_s64offset( + pg: svbool2_t, + base: *const crate::ffi::c_void, + offsets: svint64_t, + op: svprfop, + ); + } + _svprfb_gather_s64offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets, + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64" + )] + fn _svprfh_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfh_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64" + )] + fn _svprfw_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfw_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_s64index( + pg: svbool_t, + base: *const T, + indices: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64" + )] + fn _svprfd_gather_s64index( + pg: svbool2_t, + base: *const crate::ffi::c_void, + indices: svint64_t, + op: svprfop, + ); + } + _svprfd_gather_s64index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices, + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u32offset( + pg: svbool_t, + base: *const T, + offsets: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32" + )] + fn _svprfb_gather_u32offset( + pg: svbool4_t, + base: *const crate::ffi::c_void, + offsets: svint32_t, + op: svprfop, + ); + } + _svprfb_gather_u32offset( + pg.sve_into(), + base as *const crate::ffi::c_void, + offsets.as_signed(), + OP, + ) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32" + )] + fn _svprfh_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfh_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32" + )] + fn _svprfw_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfw_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u32index( + pg: svbool_t, + base: *const T, + indices: svuint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32" + )] + fn _svprfd_gather_u32index( + pg: svbool4_t, + base: *const crate::ffi::c_void, + indices: svint32_t, + op: svprfop, + ); + } + _svprfd_gather_u32index( + pg.sve_into(), + base as *const crate::ffi::c_void, + indices.as_signed(), + OP, + ) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_gather_u64offset( + pg: svbool_t, + base: *const T, + offsets: svuint64_t, +) { + svprfb_gather_s64offset::(pg, base, offsets.as_signed()) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfh_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfw_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_gather_u64index( + pg: svbool_t, + base: *const T, + indices: svuint64_t, +) { + svprfd_gather_s64index::(pg, base, indices.as_signed()) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfb_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svuint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfb_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svuint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base(pg.sve_into(), bases.as_signed(), 0, OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u32base_offset( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32" + )] + fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop); + } + _svprfb_gather_u32base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32" + )] + fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfh_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32" + )] + fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfw_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u32base_index( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32" + )] + fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop); + } + _svprfd_gather_u32base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfb_gather_u64base_offset( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64" + )] + fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop); + } + _svprfb_gather_u64base_offset(pg.sve_into(), bases.as_signed(), offset, OP) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfh_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64" + )] + fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfh_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(1), OP) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfw_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64" + )] + fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfw_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(2), OP) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] +pub unsafe fn svprfd_gather_u64base_index( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64" + )] + fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop); + } + _svprfd_gather_u64base_index(pg.sve_into(), bases.as_signed(), index.unchecked_shl(3), OP) +} +#[doc = "Prefetch bytes"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfb::(pg, base.offset(svcntb() as isize * vnum as isize)) +} +#[doc = "Prefetch halfwords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfh::(pg, base.offset(svcnth() as isize * vnum as isize)) +} +#[doc = "Prefetch words"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfw::(pg, base.offset(svcntw() as isize * vnum as isize)) +} +#[doc = "Prefetch doublewords"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] +pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vnum: i64) { + svprfd::(pg, base.offset(svcntd() as isize * vnum as isize)) +} +#[doc = "Test whether any active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.any.nxv16i1" + )] + fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_any(pg, op) } +} +#[doc = "Test whether first active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.first.nxv16i1" + )] + fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_first(pg, op) } +} +#[doc = "Test whether last active element is true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptest))] +pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ptest.last.nxv16i1" + )] + fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool; + } + unsafe { _svptest_last(pg, op) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b8() -> svbool_t { + svptrue_pat_b8::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b16() -> svbool_t { + svptrue_pat_b16::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b32() -> svbool_t { + svptrue_pat_b32::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ptrue))] +pub fn svptrue_b64() -> svbool_t { + svptrue_pat_b64::<{ svpattern::SV_ALL }>() +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b8() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")] + fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t; + } + unsafe { _svptrue_pat_b8(PATTERN) } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b16() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")] + fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t; + } + unsafe { _svptrue_pat_b16(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b32() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")] + fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t; + } + unsafe { _svptrue_pat_b32(PATTERN).sve_into() } +} +#[doc = "Set predicate elements to true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] +pub fn svptrue_pat_b64() -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")] + fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t; + } + unsafe { _svptrue_pat_b64(PATTERN).sve_into() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8" + )] + fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16" + )] + fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32" + )] + fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64" + )] + fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64(op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8" + )] + fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16" + )] + fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32" + )] + fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64" + )] + fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s32(op: i32) -> i32 { + svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s32(op: i32) -> i32 { + svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s32(op: i32) -> i32 { + svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s32(op: i32) -> i32 { + svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_s64(op: i64) -> i64 { + svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_s64(op: i64) -> i64 { + svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_s64(op: i64) -> i64 { + svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_s64(op: i64) -> i64 { + svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u32(op: u32) -> u32 { + svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u32(op: u32) -> u32 { + svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u32(op: u32) -> u32 { + svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u32(op: u32) -> u32 { + svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] +pub fn svqdecb_n_u64(op: u64) -> u64 { + svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_n_u64(op: u64) -> u64 { + svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_n_u64(op: u64) -> u64 { + svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_n_u64(op: u64) -> u64 { + svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")] + fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")] + fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")] + fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")] + fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")] + fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")] + fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")] + fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")] + fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")] + fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")] + fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")] + fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")] + fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")] + fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")] + fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")] + fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")] + fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")] + fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")] + fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")] + fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdech_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")] + fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")] + fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqdecd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")] + fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] +pub fn svqdech_s16(op: svint16_t) -> svint16_t { + svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_s32(op: svint32_t) -> svint32_t { + svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_s64(op: svint64_t) -> svint64_t { + svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] +pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { + svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] +pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { + svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] +pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { + svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_s32_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_s64_b8(op, pg) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1" + )] + fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1" + )] + fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1" + )] + fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1" + )] + fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1" + )] + fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1" + )] + fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1" + )] + fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1" + )] + fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")] + fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")] + fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdecp))] +pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")] + fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")] + fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqdecp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")] + fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqdecp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating decrement by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqdecp))] +pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")] + fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqdecp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s32(op: i32) -> i32 { + svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s32(op: i32) -> i32 { + svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s32(op: i32) -> i32 { + svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s32(op: i32) -> i32 { + svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_s64(op: i64) -> i64 { + svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_s64(op: i64) -> i64 { + svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_s64(op: i64) -> i64 { + svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_s64(op: i64) -> i64 { + svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u32(op: u32) -> u32 { + svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u32(op: u32) -> u32 { + svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u32(op: u32) -> u32 { + svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u32(op: u32) -> u32 { + svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] +pub fn svqincb_n_u64(op: u64) -> u64 { + svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_n_u64(op: u64) -> u64 { + svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_n_u64(op: u64) -> u64 { + svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_n_u64(op: u64) -> u64 { + svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")] + fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")] + fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")] + fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s32(op: i32) -> i32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")] + fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")] + fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")] + fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")] + fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_s64(op: i64) -> i64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")] + fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")] + fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")] + fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")] + fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u32(op: u32) -> u32 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")] + fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32; + } + unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of byte elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincb_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")] + fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")] + fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")] + fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_n_u64(op: u64) -> u64 { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")] + fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64; + } + unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_s16( + op: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")] + fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_s32( + op: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")] + fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_s64( + op: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")] + fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqinch_pat_u16( + op: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")] + fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t; + } + unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincw_pat_u32( + op: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")] + fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t; + } + unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] +pub fn svqincd_pat_u64( + op: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_FACTOR, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")] + fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t; + } + unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() } +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] +pub fn svqinch_s16(op: svint16_t) -> svint16_t { + svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] +pub fn svqincw_s32(op: svint32_t) -> svint32_t { + svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] +pub fn svqincd_s64(op: svint64_t) -> svint64_t { + svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of halfword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] +pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { + svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of word elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] +pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { + svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by number of doubleword elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] +pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { + svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op) +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1" + )] + fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_s32_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1" + )] + fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_s32_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1" + )] + fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_s32_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1" + )] + fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_s32_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1" + )] + fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_s64_b8(op, pg) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1" + )] + fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_s64_b16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1" + )] + fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_s64_b32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1" + )] + fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_s64_b64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1" + )] + fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32; + } + unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1" + )] + fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32; + } + unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1" + )] + fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32; + } + unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1" + )] + fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32; + } + unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1" + )] + fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64; + } + unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1" + )] + fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64; + } + unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1" + )] + fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64; + } + unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1" + )] + fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64; + } + unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")] + fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_s16(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")] + fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_s32(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqincp))] +pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")] + fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_s64(op, pg.sve_into()) } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")] + fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t; + } + unsafe { _svqincp_u16(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")] + fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t; + } + unsafe { _svqincp_u32(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating increment by active element count"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqincp))] +pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")] + fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t; + } + unsafe { _svqincp_u64(op.as_signed(), pg.sve_into()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8" + )] + fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16" + )] + fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32" + )] + fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64" + )] + fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64(op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8" + )] + fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16" + )] + fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32" + )] + fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64" + )] + fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")] + fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svrbit_s8_m(inactive, pg, op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svrbit_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")] + fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrbit_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrbit_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")] + fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrbit_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrbit_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")] + fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrbit_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrbit_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t { + unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { + svrbit_u8_m(svdup_n_u8(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrbit_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrbit_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(op, pg, op) +} +#[doc = "Reverse bits"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rbit))] +pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrbit_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr() -> svbool_t { + svrdffr_z(svptrue_b8()) +} +#[doc = "Read FFR, returning predicate of succesfully loaded elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rdffr))] +pub fn svrdffr_z(pg: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")] + fn _svrdffr_z(pg: svbool_t) -> svbool_t; + } + unsafe { _svrdffr_z(pg) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32" + )] + fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpe_f32(op) } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpe))] +pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64" + )] + fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpe_f64(op) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv4f32" + )] + fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecps_f32(op1, op2) } +} +#[doc = "Reciprocal step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecps))] +pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecps.x.nxv2f64" + )] + fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecps_f64(op1, op2) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32" + )] + fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrecpx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrecpx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64" + )] + fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrecpx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(op, pg, op) +} +#[doc = "Reciprocal exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frecpx))] +pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrecpx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reinterpret vector contents"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b8(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + fn _svrev_b8(op: svbool_t) -> svbool_t; + } + unsafe { _svrev_b8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b16(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + fn _svrev_b16(op: svbool8_t) -> svbool8_t; + } + unsafe { _svrev_b16(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b32(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + fn _svrev_b32(op: svbool4_t) -> svbool4_t; + } + unsafe { _svrev_b32(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_b64(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + fn _svrev_b64(op: svbool2_t) -> svbool2_t; + } + unsafe { _svrev_b64(op.sve_into()).sve_into() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrev_f32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrev_f64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s8(op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + fn _svrev_s8(op: svint8_t) -> svint8_t; + } + unsafe { _svrev_s8(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s16(op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + fn _svrev_s16(op: svint16_t) -> svint16_t; + } + unsafe { _svrev_s16(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s32(op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + fn _svrev_s32(op: svint32_t) -> svint32_t; + } + unsafe { _svrev_s32(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_s64(op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + fn _svrev_s64(op: svint64_t) -> svint64_t; + } + unsafe { _svrev_s64(op) } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u8(op: svuint8_t) -> svuint8_t { + unsafe { svrev_s8(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u16(op: svuint16_t) -> svuint16_t { + unsafe { svrev_s16(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u32(op: svuint32_t) -> svuint32_t { + unsafe { svrev_s32(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse all elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rev))] +pub fn svrev_u64(op: svuint64_t) -> svuint64_t { + unsafe { svrev_s64(op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")] + fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svrevb_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svrevb_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")] + fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevb_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevb_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")] + fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevb_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevb_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t { + unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { + svrevb_u16_m(svdup_n_u16(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevb_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(op, pg, op) +} +#[doc = "Reverse bytes within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revb))] +pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevb_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")] + fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrevh_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svrevh_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")] + fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevh_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevh_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrevh_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(op, pg, op) +} +#[doc = "Reverse halfwords within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revh))] +pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevh_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")] + fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svrevw_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svrevw_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t { + unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() } +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(op, pg, op) +} +#[doc = "Reverse words within elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(revw))] +pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { + svrevw_u64_m(svdup_n_u64(0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")] + fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinta_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinta_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")] + fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinta_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties away from zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinta))] +pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinta_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")] + fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrinti_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrinti_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")] + fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrinti_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (inexact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frinti))] +pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrinti_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")] + fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintm_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintm_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")] + fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintm_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(op, pg, op) +} +#[doc = "Round towards -∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintm))] +pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintm_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")] + fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintn_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintn_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")] + fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintn_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(op, pg, op) +} +#[doc = "Round to nearest, ties to even"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintn))] +pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintn_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")] + fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintp_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintp_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")] + fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintp_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(op, pg, op) +} +#[doc = "Round towards +∞"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintp))] +pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintp_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")] + fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintx_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintx_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")] + fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintx_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(op, pg, op) +} +#[doc = "Round using current rounding mode (exact)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintx))] +pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintx_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")] + fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrintz_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svrintz_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")] + fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrintz_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(op, pg, op) +} +#[doc = "Round towards zero"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frintz))] +pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svrintz_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32" + )] + fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrte_f32(op) } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrte))] +pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64" + )] + fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrte_f64(op) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32" + )] + fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svrsqrts_f32(op1, op2) } +} +#[doc = "Reciprocal square root step"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(frsqrts))] +pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64" + )] + fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svrsqrts_f64(op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")] + fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svscale_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t { + svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t { + svscale_f32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")] + fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svscale_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, op1, op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t { + svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Adjust exponent"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fscale))] +pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t { + svscale_f64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe { simd_select(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { simd_select::(pg, op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Conditionally select elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sel))] +pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { simd_select::(pg.sve_into(), op1, op2) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { + static_assert_range!(IMM_INDEX, 0..=2); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Change one vector in a tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { crate::intrinsics::simd::scalable::sve_tuple_set::<_, _, { IMM_INDEX }>(tuple, x) } +} +#[doc = "Initialize the first-fault register to all-true"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(setffr))] +pub fn svsetffr() { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")] + fn _svsetffr(); + } + unsafe { _svsetffr() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")] + fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsplice_f32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")] + fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsplice_f64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")] + fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsplice_s8(pg, op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")] + fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsplice_s16(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")] + fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsplice_s32(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")] + fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsplice_s64(pg.sve_into(), op1, op2) } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Splice two vectors under predicate control"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(splice))] +pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")] + fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsqrt_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { + svsqrt_f32_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")] + fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsqrt_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(op, pg, op) +} +#[doc = "Square root"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsqrt))] +pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { + svsqrt_f64_m(svdup_n_f64(0.0), pg, op) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")] + fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")] + fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")] + fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst1_s8(data, pg, base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")] + fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")] + fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")] + fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svst1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svst1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svst1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svst1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32" + )] + fn _svst1_scatter_s32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_f32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32" + )] + fn _svst1_scatter_s32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_s32index_s32(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64" + )] + fn _svst1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64" + )] + fn _svst1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svst1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_f32( + pg: svbool_t, + base: *mut f32, + indices: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32" + )] + fn _svst1_scatter_u32index_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_f32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_s32( + pg: svbool_t, + base: *mut i32, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32" + )] + fn _svst1_scatter_u32index_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + indices: svint32_t, + ); + } + _svst1_scatter_u32index_s32(data, pg.sve_into(), base, indices.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32index_u32( + pg: svbool_t, + base: *mut u32, + indices: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32" + )] + fn _svst1_scatter_s32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_f32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32" + )] + fn _svst1_scatter_s32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_s32offset_s32(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svint32_t, + data: svuint32_t, +) { + svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64" + )] + fn _svst1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64" + )] + fn _svst1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svst1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32" + )] + fn _svst1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32" + )] + fn _svst1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svst1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svst1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svst1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svst1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svst1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1d))] +pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")] + fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8); + } + _svst1b_s16( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")] + fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8); + } + _svst1b_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")] + fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16); + } + _svst1h_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")] + fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8); + } + _svst1b_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")] + fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16); + } + _svst1h_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")] + fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32); + } + _svst1w_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { + svst1b_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { + svst1b_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { + svst1h_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { + svst1b_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { + svst1h_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { + svst1w_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8" + )] + fn _svst1b_scatter_s32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_s32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16" + )] + fn _svst1h_scatter_s32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_s32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svint32_t, + data: svuint32_t, +) { + svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8" + )] + fn _svst1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svst1b_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16" + )] + fn _svst1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svst1h_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32" + )] + fn _svst1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svst1w_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8" + )] + fn _svst1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svst1b_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16" + )] + fn _svst1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svst1h_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svst1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svst1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svst1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svst1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svst1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svst1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svst1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svst1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svst1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svst1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svst1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) { + svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) { + svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) { + svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) { + svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) { + svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) { + svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) { + svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) { + svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) { + svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Truncate to 8 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1b))] +pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) { + svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) { + svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) { + svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16" + )] + fn _svst1h_scatter_s32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_s32index_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svint32_t, + data: svuint32_t, +) { + svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16" + )] + fn _svst1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svst1h_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32" + )] + fn _svst1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svst1w_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_s32( + pg: svbool_t, + base: *mut i16, + indices: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16" + )] + fn _svst1h_scatter_u32index_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + indices: svint32_t, + ); + } + _svst1h_scatter_u32index_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32index_u32( + pg: svbool_t, + base: *mut u16, + indices: svuint32_t, + data: svuint32_t, +) { + svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1h))] +pub unsafe fn svst1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st1w))] +pub unsafe fn svst1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")] + fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svst2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")] + fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svst2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")] + fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")] + fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svst2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")] + fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svst2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")] + fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svst2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { + svst2_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { + svst2_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { + svst2_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { + svst2_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) { + svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) { + svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) { + svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) { + svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) { + svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) { + svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2b))] +pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) { + svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2h))] +pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) { + svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2w))] +pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) { + svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store two vectors into two-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st2d))] +pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) { + svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")] + fn _svst3_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst3_f32( + svget3_f32::<0>(data), + svget3_f32::<1>(data), + svget3_f32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")] + fn _svst3_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst3_f64( + svget3_f64::<0>(data), + svget3_f64::<1>(data), + svget3_f64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")] + fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svst3_s8( + svget3_s8::<0>(data), + svget3_s8::<1>(data), + svget3_s8::<2>(data), + pg, + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")] + fn _svst3_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst3_s16( + svget3_s16::<0>(data), + svget3_s16::<1>(data), + svget3_s16::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")] + fn _svst3_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst3_s32( + svget3_s32::<0>(data), + svget3_s32::<1>(data), + svget3_s32::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")] + fn _svst3_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst3_s64( + svget3_s64::<0>(data), + svget3_s64::<1>(data), + svget3_s64::<2>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { + svst3_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { + svst3_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { + svst3_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { + svst3_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) { + svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) { + svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) { + svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) { + svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) { + svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) { + svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3b))] +pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) { + svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3h))] +pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) { + svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3w))] +pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) { + svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store three vectors into three-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st3d))] +pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) { + svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")] + fn _svst4_f32( + data0: svfloat32_t, + data1: svfloat32_t, + data2: svfloat32_t, + data3: svfloat32_t, + pg: svbool4_t, + ptr: *mut f32, + ); + } + _svst4_f32( + svget4_f32::<0>(data), + svget4_f32::<1>(data), + svget4_f32::<2>(data), + svget4_f32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")] + fn _svst4_f64( + data0: svfloat64_t, + data1: svfloat64_t, + data2: svfloat64_t, + data3: svfloat64_t, + pg: svbool2_t, + ptr: *mut f64, + ); + } + _svst4_f64( + svget4_f64::<0>(data), + svget4_f64::<1>(data), + svget4_f64::<2>(data), + svget4_f64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")] + fn _svst4_s8( + data0: svint8_t, + data1: svint8_t, + data2: svint8_t, + data3: svint8_t, + pg: svbool_t, + ptr: *mut i8, + ); + } + _svst4_s8( + svget4_s8::<0>(data), + svget4_s8::<1>(data), + svget4_s8::<2>(data), + svget4_s8::<3>(data), + pg, + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")] + fn _svst4_s16( + data0: svint16_t, + data1: svint16_t, + data2: svint16_t, + data3: svint16_t, + pg: svbool8_t, + ptr: *mut i16, + ); + } + _svst4_s16( + svget4_s16::<0>(data), + svget4_s16::<1>(data), + svget4_s16::<2>(data), + svget4_s16::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")] + fn _svst4_s32( + data0: svint32_t, + data1: svint32_t, + data2: svint32_t, + data3: svint32_t, + pg: svbool4_t, + ptr: *mut i32, + ); + } + _svst4_s32( + svget4_s32::<0>(data), + svget4_s32::<1>(data), + svget4_s32::<2>(data), + svget4_s32::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")] + fn _svst4_s64( + data0: svint64_t, + data1: svint64_t, + data2: svint64_t, + data3: svint64_t, + pg: svbool2_t, + ptr: *mut i64, + ); + } + _svst4_s64( + svget4_s64::<0>(data), + svget4_s64::<1>(data), + svget4_s64::<2>(data), + svget4_s64::<3>(data), + pg.sve_into(), + base, + ) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { + svst4_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { + svst4_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { + svst4_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { + svst4_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) { + svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) { + svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) { + svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) { + svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) { + svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) { + svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4b))] +pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) { + svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4h))] +pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) { + svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4w))] +pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) { + svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Store four vectors into four-element tuples"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(st4d))] +pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) { + svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")] + fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32); + } + _svstnt1_f32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")] + fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64); + } + _svstnt1_f64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")] + fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8); + } + _svstnt1_s8(data, pg, base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")] + fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16); + } + _svstnt1_s16(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")] + fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32); + } + _svstnt1_s32(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")] + fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64); + } + _svstnt1_s64(data, pg.sve_into(), base) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { + svstnt1_s8(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { + svstnt1_s16(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { + svstnt1_s32(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { + svstnt1_s64(pg, base.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) { + svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) { + svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) { + svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) { + svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) { + svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) { + svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) { + svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) { + svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) { + svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) { + svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")] + fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsub_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsub_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")] + fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsub_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsub))] +pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsub_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")] + fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsub_s8_m(pg, op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")] + fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")] + fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")] + fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, op1, op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sub))] +pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")] + fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svsubr_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_m(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_x(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { + svsubr_f32_z(pg, op1, svdup_n_f32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")] + fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svsubr_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_m(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_x(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fsubr))] +pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { + svsubr_f64_z(pg, op1, svdup_n_f64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")] + fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsubr_s8_m(pg, op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")] + fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")] + fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")] + fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, op1, op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subr))] +pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] +pub fn svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svuint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32" + )] + fn _svsudot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) } +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { + svusdot_s32(op1, op3, op2) +} +#[doc = "Dot product (signed × unsigned)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { + svsudot_s32(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")] + fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbl_f32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")] + fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbl_f64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")] + fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbl_s8(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")] + fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbl_s16(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")] + fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbl_s32(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")] + fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbl_s64(data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32" + )] + fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t; + } + unsafe { _svtmad_f32(op1, op2, IMM3) } +} +#[doc = "Trigonometric multiply-add coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] +pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64" + )] + fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t; + } + unsafe { _svtmad_f64(op1, op2, IMM3) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")] + fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn1_b8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")] + fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")] + fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")] + fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")] + fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1_f32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")] + fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1_f64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")] + fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1_s8(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")] + fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1_s16(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")] + fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1_s32(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")] + fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1_s64(op1, op2) } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")] + fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn1q_f32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")] + fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn1q_f64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")] + fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn1q_s8(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")] + fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn1q_s16(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")] + fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn1q_s32(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")] + fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn1q_s64(op1, op2) } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn1))] +pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")] + fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svtrn2_b8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")] + fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svtrn2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")] + fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svtrn2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")] + fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svtrn2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")] + fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2_f32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")] + fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2_f64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")] + fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2_s8(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")] + fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2_s16(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")] + fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2_s32(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")] + fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2_s64(op1, op2) } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")] + fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svtrn2q_f32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")] + fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svtrn2q_f64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")] + fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svtrn2q_s8(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")] + fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svtrn2q_s16(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")] + fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svtrn2q_s32(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")] + fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svtrn2q_s64(op1, op2) } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(trn2))] +pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32" + )] + fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtsmul_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric starting value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftsmul))] +pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64" + )] + fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtsmul_f64(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32" + )] + fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t; + } + unsafe { _svtssel_f32(op1, op2.as_signed()) } +} +#[doc = "Trigonometric select coefficient"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ftssel))] +pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64" + )] + fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t; + } + unsafe { _svtssel_f64(op1, op2.as_signed()) } +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f32() -> svfloat32x2_t { + svcreate2_f32(svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_f64() -> svfloat64x2_t { + svcreate2_f64(svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s8() -> svint8x2_t { + svcreate2_s8(svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s16() -> svint16x2_t { + svcreate2_s16(svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s32() -> svint32x2_t { + svcreate2_s32(svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_s64() -> svint64x2_t { + svcreate2_s64(svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u8() -> svuint8x2_t { + svcreate2_u8(svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u16() -> svuint16x2_t { + svcreate2_u16(svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u32() -> svuint32x2_t { + svcreate2_u32(svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of two vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef2_u64() -> svuint64x2_t { + svcreate2_u64(svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f32() -> svfloat32x3_t { + svcreate3_f32(svdup_n_f32(0f32), svdup_n_f32(0f32), svdup_n_f32(0f32)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_f64() -> svfloat64x3_t { + svcreate3_f64(svdup_n_f64(0f64), svdup_n_f64(0f64), svdup_n_f64(0f64)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s8() -> svint8x3_t { + svcreate3_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s16() -> svint16x3_t { + svcreate3_s16(svdup_n_s16(0), svdup_n_s16(0), svdup_n_s16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s32() -> svint32x3_t { + svcreate3_s32(svdup_n_s32(0), svdup_n_s32(0), svdup_n_s32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_s64() -> svint64x3_t { + svcreate3_s64(svdup_n_s64(0), svdup_n_s64(0), svdup_n_s64(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u8() -> svuint8x3_t { + svcreate3_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u16() -> svuint16x3_t { + svcreate3_u16(svdup_n_u16(0), svdup_n_u16(0), svdup_n_u16(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u32() -> svuint32x3_t { + svcreate3_u32(svdup_n_u32(0), svdup_n_u32(0), svdup_n_u32(0)) +} +#[doc = "Create an uninitialized tuple of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef3_u64() -> svuint64x3_t { + svcreate3_u64(svdup_n_u64(0), svdup_n_u64(0), svdup_n_u64(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f32() -> svfloat32x4_t { + svcreate4_f32( + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + svdup_n_f32(0f32), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_f64() -> svfloat64x4_t { + svcreate4_f64( + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + svdup_n_f64(0f64), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s8() -> svint8x4_t { + svcreate4_s8(svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0), svdup_n_s8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s16() -> svint16x4_t { + svcreate4_s16( + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + svdup_n_s16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s32() -> svint32x4_t { + svcreate4_s32( + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + svdup_n_s32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_s64() -> svint64x4_t { + svcreate4_s64( + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + svdup_n_s64(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u8() -> svuint8x4_t { + svcreate4_u8(svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0), svdup_n_u8(0)) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u16() -> svuint16x4_t { + svcreate4_u16( + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + svdup_n_u16(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u32() -> svuint32x4_t { + svcreate4_u32( + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + svdup_n_u32(0), + ) +} +#[doc = "Create an uninitialized tuple of four vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef4_u64() -> svuint64x4_t { + svcreate4_u64( + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + svdup_n_u64(0), + ) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f32() -> svfloat32_t { + svdup_n_f32(0f32) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_f64() -> svfloat64_t { + svdup_n_f64(0f64) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s8() -> svint8_t { + svdup_n_s8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s16() -> svint16_t { + svdup_n_s16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s32() -> svint32_t { + svdup_n_s32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_s64() -> svint64_t { + svdup_n_s64(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u8() -> svuint8_t { + svdup_n_u8(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u16() -> svuint16_t { + svdup_n_u16(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u32() -> svuint32_t { + svdup_n_u32(0) +} +#[doc = "Create an uninitialized vector"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] +#[doc = "## Safety"] +#[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +pub unsafe fn svundef_u64() -> svuint64_t { + svdup_n_u64(0) +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] +pub fn svusdot_lane_s32( + op1: svint32_t, + op2: svuint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32" + )] + fn _svusdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + ) -> svint32_t; + } + unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")] + fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusdot_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Dot product (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usdot))] +pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { + svusdot_s32(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Matrix multiply-accumulate (unsigned × signed)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,i8mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usmmla))] +pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")] + fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t; + } + unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")] + fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp1_b8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")] + fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")] + fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")] + fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")] + fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1_f32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")] + fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1_f64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")] + fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1_s8(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")] + fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1_s16(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")] + fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1_s32(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")] + fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1_s64(op1, op2) } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")] + fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp1q_f32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")] + fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp1q_f64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")] + fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp1q_s8(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")] + fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp1q_s16(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")] + fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp1q_s32(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")] + fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp1q_s64(op1, op2) } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate even quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp1))] +pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")] + fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svuzp2_b8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")] + fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svuzp2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")] + fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svuzp2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")] + fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svuzp2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")] + fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2_f32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")] + fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2_f64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")] + fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2_s8(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")] + fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2_s16(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")] + fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2_s32(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")] + fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2_s64(op1, op2) } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd elements from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")] + fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svuzp2q_f32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")] + fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svuzp2q_f64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")] + fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuzp2q_s8(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")] + fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuzp2q_s16(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")] + fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuzp2q_s32(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")] + fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuzp2q_s64(op1, op2) } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Concatenate odd quadwords from two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uzp2))] +pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32" + )] + fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32" + )] + fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32" + )] + fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32" + )] + fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64" + )] + fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64" + )] + fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64" + )] + fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilele))] +pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64" + )] + fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32" + )] + fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32" + )] + fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32" + )] + fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32" + )] + fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64" + )] + fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64" + )] + fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64" + )] + fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilels))] +pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64" + )] + fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32" + )] + fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_s32(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32" + )] + fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32" + )] + fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32" + )] + fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64" + )] + fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_s64(op1, op2) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64" + )] + fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64" + )] + fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelt))] +pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64" + )] + fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32" + )] + fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32" + )] + fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32" + )] + fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32" + )] + fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64" + )] + fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64" + )] + fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64" + )] + fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While incrementing scalar is less than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilelo))] +pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64" + )] + fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "Write to the first-fault register"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(wrffr))] +pub fn svwrffr(op: svbool_t) { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")] + fn _svwrffr(op: svbool_t); + } + unsafe { _svwrffr(op) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")] + fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip1_b8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")] + fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip1_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")] + fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip1_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")] + fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip1_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")] + fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1_f32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")] + fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1_f64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")] + fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1_s8(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")] + fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1_s16(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")] + fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1_s32(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")] + fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1_s64(op1, op2) } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")] + fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip1q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")] + fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip1q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")] + fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip1q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")] + fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip1q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")] + fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip1q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")] + fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip1q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from low halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip1))] +pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")] + fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t; + } + unsafe { _svzip2_b8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")] + fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t; + } + unsafe { _svzip2_b16(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")] + fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t; + } + unsafe { _svzip2_b32(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")] + fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t; + } + unsafe { _svzip2_b64(op1.sve_into(), op2.sve_into()).sve_into() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")] + fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2_f32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")] + fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2_f64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")] + fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2_s8(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")] + fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2_s16(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")] + fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2_s32(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")] + fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2_s64(op1, op2) } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave elements from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")] + fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svzip2q_f32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")] + fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svzip2q_f64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")] + fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svzip2q_s8(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")] + fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svzip2q_s16(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")] + fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svzip2q_s32(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")] + fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svzip2q_s64(op1, op2) } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleave quadwords from high halves of two inputs"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,f64mm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(zip2))] +pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs new file mode 100644 index 000000000000..973b7e9fa35a --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs @@ -0,0 +1,9345 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f32_with_svst1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_f64_with_svst1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svst1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s8_with_svst1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s16_with_svst1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s32_with_svst1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_s64_with_svst1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u8_with_svst1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u16_with_svst1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u32_with_svst1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_u64_with_svst1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svst1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svst1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f32_with_svst1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svst1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svtrn1q_f32( + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + svdupq_n_f32(4usize as f32, 5usize as f32, 6usize as f32, 7usize as f32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_f64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_f64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svtrn1q_f64( + svdupq_n_f64(0usize as f64, 1usize as f64), + svdupq_n_f64(2usize as f64, 3usize as f64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svtrn1q_s8( + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + svdupq_n_s8( + 16usize as i8, + 17usize as i8, + 18usize as i8, + 19usize as i8, + 20usize as i8, + 21usize as i8, + 22usize as i8, + 23usize as i8, + 24usize as i8, + 25usize as i8, + 26usize as i8, + 27usize as i8, + 28usize as i8, + 29usize as i8, + 30usize as i8, + 31usize as i8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svtrn1q_s16( + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + svdupq_n_s16( + 8usize as i16, + 9usize as i16, + 10usize as i16, + 11usize as i16, + 12usize as i16, + 13usize as i16, + 14usize as i16, + 15usize as i16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svtrn1q_s32( + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + svdupq_n_s32(4usize as i32, 5usize as i32, 6usize as i32, 7usize as i32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_s64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_s64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svtrn1q_s64( + svdupq_n_s64(0usize as i64, 1usize as i64), + svdupq_n_s64(2usize as i64, 3usize as i64), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u8() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u8 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svtrn1q_u8( + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + svdupq_n_u8( + 16usize as u8, + 17usize as u8, + 18usize as u8, + 19usize as u8, + 20usize as u8, + 21usize as u8, + 22usize as u8, + 23usize as u8, + 24usize as u8, + 25usize as u8, + 26usize as u8, + 27usize as u8, + 28usize as u8, + 29usize as u8, + 30usize as u8, + 31usize as u8, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u16() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u16 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svtrn1q_u16( + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + svdupq_n_u16( + 8usize as u16, + 9usize as u16, + 10usize as u16, + 11usize as u16, + 12usize as u16, + 13usize as u16, + 14usize as u16, + 15usize as u16, + ), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u32() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u32 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svtrn1q_u32( + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + svdupq_n_u32(4usize as u32, 5usize as u32, 6usize as u32, 7usize as u32), + ), + ); +} +#[simd_test(enable = "sve,f64mm")] +unsafe fn test_svld1ro_u64() { + if svcntb() < 32 { + println!("Skipping test_svld1ro_u64 due to SVE vector length"); + return; + } + svsetffr(); + let loaded = svld1ro_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svtrn1q_u64( + svdupq_n_u64(0usize as u64, 1usize as u64), + svdupq_n_u64(2usize as u64, 3usize as u64), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f32() { + svsetffr(); + let loaded = svld1rq_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_f64() { + svsetffr(); + let loaded = svld1rq_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64(loaded, svdupq_n_f64(0usize as f64, 1usize as f64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s8() { + svsetffr(); + let loaded = svld1rq_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svdupq_n_s8( + 0usize as i8, + 1usize as i8, + 2usize as i8, + 3usize as i8, + 4usize as i8, + 5usize as i8, + 6usize as i8, + 7usize as i8, + 8usize as i8, + 9usize as i8, + 10usize as i8, + 11usize as i8, + 12usize as i8, + 13usize as i8, + 14usize as i8, + 15usize as i8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s16() { + svsetffr(); + let loaded = svld1rq_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svdupq_n_s16( + 0usize as i16, + 1usize as i16, + 2usize as i16, + 3usize as i16, + 4usize as i16, + 5usize as i16, + 6usize as i16, + 7usize as i16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s32() { + svsetffr(); + let loaded = svld1rq_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_s64() { + svsetffr(); + let loaded = svld1rq_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64(loaded, svdupq_n_s64(0usize as i64, 1usize as i64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u8() { + svsetffr(); + let loaded = svld1rq_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svdupq_n_u8( + 0usize as u8, + 1usize as u8, + 2usize as u8, + 3usize as u8, + 4usize as u8, + 5usize as u8, + 6usize as u8, + 7usize as u8, + 8usize as u8, + 9usize as u8, + 10usize as u8, + 11usize as u8, + 12usize as u8, + 13usize as u8, + 14usize as u8, + 15usize as u8, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u16() { + svsetffr(); + let loaded = svld1rq_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svdupq_n_u16( + 0usize as u16, + 1usize as u16, + 2usize as u16, + 3usize as u16, + 4usize as u16, + 5usize as u16, + 6usize as u16, + 7usize as u16, + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u32() { + svsetffr(); + let loaded = svld1rq_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1rq_u64() { + svsetffr(); + let loaded = svld1rq_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64(loaded, svdupq_n_u64(0usize as u64, 1usize as u64)); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_s64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_s64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_s64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u16(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u32(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u32(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_u64(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_u64(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_u64(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1sb_vnum_s64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u16(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u32(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1sb_vnum_u64(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1sh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1sw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svst1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svst1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s16_with_svst1b_s16() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s32_with_svst1b_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s32_with_svst1h_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_s64_with_svst1b_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_s64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_s64_with_svst1h_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_s64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_s64_with_svst1w_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_s64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u16_with_svst1b_u16() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u16(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u32_with_svst1b_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u32(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u32_with_svst1h_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u32(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_u64_with_svst1b_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_u64(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_u64_with_svst1h_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_u64(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_u64_with_svst1w_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_u64(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld1ub_vnum_s64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u16(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u32(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld1ub_vnum_u64(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld1uh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld1uw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s32(0, 1); + svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u32(0, 1); + svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svld1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svld1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svst1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f32_with_svst2_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_f64_with_svst2_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + svst2_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s8_with_svst2_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s16_with_svst2_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s32_with_svst2_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_s64_with_svst2_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u8_with_svst2_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u16_with_svst2_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u32_with_svst2_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_u64_with_svst2_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + svst2_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f32_with_svst2_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate2_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld2_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget2_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget2_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_f64_with_svst2_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate2_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + svst2_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld2_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget2_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget2_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate2_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld2_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget2_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget2_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate2_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld2_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget2_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget2_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate2_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld2_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget2_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget2_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate2_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld2_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget2_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget2_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate2_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld2_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget2_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget2_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate2_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld2_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget2_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget2_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate2_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld2_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget2_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget2_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate2_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + svst2_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld2_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget2_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget2_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 2usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f32_with_svst3_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_f64_with_svst3_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + svst3_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s8_with_svst3_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s16_with_svst3_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s32_with_svst3_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_s64_with_svst3_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u8_with_svst3_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u16_with_svst3_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u32_with_svst3_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_u64_with_svst3_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + svst3_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f32_with_svst3_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate3_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld3_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget3_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget3_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_f64_with_svst3_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate3_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + svst3_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld3_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget3_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget3_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate3_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld3_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget3_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget3_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate3_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld3_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget3_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget3_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate3_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld3_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget3_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget3_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate3_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld3_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget3_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget3_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate3_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld3_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget3_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget3_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate3_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld3_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget3_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget3_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate3_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld3_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget3_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget3_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate3_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + svst3_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld3_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget3_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget3_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 3usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f32_with_svst4_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_f64_with_svst4_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + svst4_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s8_with_svst4_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s16_with_svst4_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s32_with_svst4_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_s64_with_svst4_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u8_with_svst4_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u16_with_svst4_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u32_with_svst4_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_u64_with_svst4_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + svst4_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f32_with_svst4_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcreate4_f32( + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svld4_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + svget4_f32::<{ 0usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 1usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 2usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f32( + svget4_f32::<{ 3usize as i32 }>(loaded), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_f64_with_svst4_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcreate4_f64( + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + svst4_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svld4_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + svget4_f64::<{ 0usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 1usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 2usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); + assert_vector_matches_f64( + svget4_f64::<{ 3usize as i32 }>(loaded), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svcreate4_s8( + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svld4_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + svget4_s8::<{ 0usize as i32 }>(loaded), + svindex_s8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 1usize as i32 }>(loaded), + svindex_s8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 2usize as i32 }>(loaded), + svindex_s8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i8( + svget4_s8::<{ 3usize as i32 }>(loaded), + svindex_s8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svcreate4_s16( + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svld4_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + svget4_s16::<{ 0usize as i32 }>(loaded), + svindex_s16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 1usize as i32 }>(loaded), + svindex_s16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 2usize as i32 }>(loaded), + svindex_s16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i16( + svget4_s16::<{ 3usize as i32 }>(loaded), + svindex_s16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svcreate4_s32( + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svld4_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + svget4_s32::<{ 0usize as i32 }>(loaded), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 1usize as i32 }>(loaded), + svindex_s32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 2usize as i32 }>(loaded), + svindex_s32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i32( + svget4_s32::<{ 3usize as i32 }>(loaded), + svindex_s32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svcreate4_s64( + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svld4_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + svget4_s64::<{ 0usize as i32 }>(loaded), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 1usize as i32 }>(loaded), + svindex_s64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 2usize as i32 }>(loaded), + svindex_s64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_i64( + svget4_s64::<{ 3usize as i32 }>(loaded), + svindex_s64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svcreate4_u8( + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svld4_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + svget4_u8::<{ 0usize as i32 }>(loaded), + svindex_u8( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 1usize as i32 }>(loaded), + svindex_u8( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 2usize as i32 }>(loaded), + svindex_u8( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u8( + svget4_u8::<{ 3usize as i32 }>(loaded), + svindex_u8( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svcreate4_u16( + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svld4_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + svget4_u16::<{ 0usize as i32 }>(loaded), + svindex_u16( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 1usize as i32 }>(loaded), + svindex_u16( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 2usize as i32 }>(loaded), + svindex_u16( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u16( + svget4_u16::<{ 3usize as i32 }>(loaded), + svindex_u16( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svcreate4_u32( + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svld4_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + svget4_u32::<{ 0usize as i32 }>(loaded), + svindex_u32( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 1usize as i32 }>(loaded), + svindex_u32( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 2usize as i32 }>(loaded), + svindex_u32( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u32( + svget4_u32::<{ 3usize as i32 }>(loaded), + svindex_u32( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svcreate4_u64( + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + svst4_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svld4_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + svget4_u64::<{ 0usize as i32 }>(loaded), + svindex_u64( + (len + 0usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 1usize as i32 }>(loaded), + svindex_u64( + (len + 1usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 2usize as i32 }>(loaded), + svindex_u64( + (len + 2usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); + assert_vector_matches_u64( + svget4_u64::<{ 3usize as i32 }>(loaded), + svindex_u64( + (len + 3usize).try_into().unwrap(), + 4usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldff1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldff1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldff1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldff1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldff1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldff1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldff1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f32() { + svsetffr(); + let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); + let loaded = svldnf1_f32(svptrue_b32(), F32_DATA.as_ptr()); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_f64() { + svsetffr(); + let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr()); + let loaded = svldnf1_f64(svptrue_b64(), F64_DATA.as_ptr()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s8() { + svsetffr(); + let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1_s8(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s16() { + svsetffr(); + let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1_s16(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s32() { + svsetffr(); + let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1_s32(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_s64() { + svsetffr(); + let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr()); + let loaded = svldnf1_s64(svptrue_b64(), I64_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u8() { + svsetffr(); + let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1_u8(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u16() { + svsetffr(); + let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1_u16(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u32() { + svsetffr(); + let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1_u32(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_u64() { + svsetffr(); + let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr()); + let loaded = svldnf1_u64(svptrue_b64(), U64_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_s64() { + svsetffr(); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u8() { + svsetffr(); + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldnf1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldnf1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldnf1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldnf1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldnf1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldnf1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldnf1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s16() { + svsetffr(); + let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s32() { + svsetffr(); + let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s32() { + svsetffr(); + let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_s64() { + svsetffr(); + let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_s64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_s64() { + svsetffr(); + let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_s64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_s64() { + svsetffr(); + let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_s64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u16() { + svsetffr(); + let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u16(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u32() { + svsetffr(); + let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u32(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u32() { + svsetffr(); + let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u32(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_u64() { + svsetffr(); + let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + let loaded = svldnf1ub_u64(svptrue_b8(), U8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_u64() { + svsetffr(); + let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + let loaded = svldnf1uh_u64(svptrue_b16(), U16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_u64() { + svsetffr(); + let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + let loaded = svldnf1uw_u64(svptrue_b32(), U32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s16() { + svsetffr(); + let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s32() { + svsetffr(); + let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s32() { + svsetffr(); + let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_s64() { + svsetffr(); + let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_s64() { + svsetffr(); + let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_s64() { + svsetffr(); + let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u16() { + svsetffr(); + let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u32() { + svsetffr(); + let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u32() { + svsetffr(); + let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1ub_vnum_u64() { + svsetffr(); + let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldnf1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uh_vnum_u64() { + svsetffr(); + let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldnf1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnf1uw_vnum_u64() { + svsetffr(); + let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldnf1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f32_with_svstnt1_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_f32(svptrue_b32(), storage.as_ptr() as *const f32); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_f64_with_svstnt1_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + svstnt1_f64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_f64(svptrue_b64(), storage.as_ptr() as *const f64); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s8_with_svstnt1_s8() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_s8(svptrue_b8(), storage.as_ptr() as *const i8); + assert_vector_matches_i8( + loaded, + svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s16_with_svstnt1_s16() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_s16(svptrue_b16(), storage.as_ptr() as *const i16); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s32_with_svstnt1_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_s32(svptrue_b32(), storage.as_ptr() as *const i32); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_s64_with_svstnt1_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_s64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_s64(svptrue_b64(), storage.as_ptr() as *const i64); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u8_with_svstnt1_u8() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u8(svptrue_b8(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_u8(svptrue_b8(), storage.as_ptr() as *const u8); + assert_vector_matches_u8( + loaded, + svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u16_with_svstnt1_u16() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u16(svptrue_b16(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_u16(svptrue_b16(), storage.as_ptr() as *const u16); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u32_with_svstnt1_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u32(svptrue_b32(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_u32(svptrue_b32(), storage.as_ptr() as *const u32); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_u64_with_svstnt1_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + svstnt1_u64(svptrue_b64(), storage.as_mut_ptr(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_u64(svptrue_b64(), storage.as_ptr() as *const u64); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f32_with_svstnt1_vnum_f32() { + let len = svcntw() as usize; + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_f64_with_svstnt1_vnum_f64() { + let len = svcntd() as usize; + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); + svstnt1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() { + let len = svcntb() as usize; + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1); + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() { + let len = svcnth() as usize; + let mut storage = [0 as i16; 640usize]; + let data = svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1); + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() { + let len = svcntw() as usize; + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1); + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() { + let len = svcntd() as usize; + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1); + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() { + let len = svcntb() as usize; + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = svldnt1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1); + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() { + let len = svcnth() as usize; + let mut storage = [0 as u16; 640usize]; + let data = svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = svldnt1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1); + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() { + let len = svcntw() as usize; + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1); + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() { + let len = svcntd() as usize; + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ); + svstnt1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1); + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb() { + svsetffr(); + let loaded = svprfb::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh() { + svsetffr(); + let loaded = svprfh::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw() { + svsetffr(); + let loaded = svprfw::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd() { + svsetffr(); + let loaded = svprfd::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr()); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s32offset() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfh_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfw_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s32index() { + let indices = svindex_s32(0, 1); + svsetffr(); + let loaded = svprfd_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_s64offset() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_s64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfh_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfw_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_s64index() { + let indices = svindex_s64(0, 1); + svsetffr(); + let loaded = svprfd_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32offset() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfh_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfw_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32index() { + let indices = svindex_u32(0, 1); + svsetffr(); + let loaded = svprfd_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b32(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64offset() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u64offset::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + offsets, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfh_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfw_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64index() { + let indices = svindex_u64(0, 1); + svsetffr(); + let loaded = svprfd_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>( + svptrue_b64(), + I64_DATA.as_ptr(), + indices, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u32base_offset() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfb_gather_u32base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 + 4u32 as i64, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfh_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfw_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u32base_index() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let loaded = svprfd_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_gather_u64base_offset() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfb_gather_u64base_offset::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 8u32.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfh_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfw_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_gather_u64base_index() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let loaded = svprfd_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>( + svptrue_b64(), + bases, + 1.try_into().unwrap(), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfb_vnum() { + svsetffr(); + let loaded = svprfb_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfh_vnum() { + svsetffr(); + let loaded = svprfh_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfw_vnum() { + svsetffr(); + let loaded = svprfw_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_svprfd_vnum() { + svsetffr(); + let loaded = svprfd_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr(), 1); +} +#[simd_test(enable = "sve")] +unsafe fn test_ffr() { + svsetffr(); + let ffr = svrdffr(); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0)); + let pred = svdupq_n_b8( + true, false, true, false, true, false, true, false, true, false, true, false, true, false, + true, false, + ); + svwrffr(pred); + let ffr = svrdffr_z(svptrue_b8()); + assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1)); +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs index 8b137891791f..79be8a88890c 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -1 +1,23857 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/` and run the following command to re-generate this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(improper_ctypes)] +#[cfg(test)] +use stdarch_test::assert_instr; + +use super::*; + +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")] + fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_s8(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svaba_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")] + fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_s16(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svaba_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")] + fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_s32(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svaba_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")] + fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_s64(op1, op2, op3) } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saba))] +pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svaba_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")] + fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svaba_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")] + fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svaba_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")] + fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svaba_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")] + fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaba))] +pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svaba_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")] + fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")] + fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")] + fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalb))] +pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")] + fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")] + fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")] + fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalb))] +pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")] + fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_s16(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svabalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")] + fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_s32(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svabalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")] + fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_s64(op1, op2, op3) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabalt))] +pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svabalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")] + fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svabalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")] + fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svabalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")] + fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabalt))] +pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svabalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")] + fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_s16(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")] + fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_s32(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")] + fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_s64(op1, op2) } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlb))] +pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")] + fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")] + fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")] + fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlb))] +pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")] + fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_s16(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svabdlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")] + fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_s32(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svabdlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")] + fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_s64(op1, op2) } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sabdlt))] +pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svabdlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")] + fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svabdlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")] + fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svabdlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")] + fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Absolute difference long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uabdlt))] +pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svabdlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")] + fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { + svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")] + fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t { + svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")] + fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sadalp))] +pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t { + svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")] + fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svadalp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")] + fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svadalp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")] + fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svadalp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, op1, op2) +} +#[doc = "Add and accumulate long pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uadalp))] +pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")] + fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")] + fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclb))] +pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")] + fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svadclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")] + fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Add with carry long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(adclt))] +pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svadclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")] + fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnb_s16(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svaddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")] + fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnb_s32(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svaddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")] + fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnb_s64(op1, op2) } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svaddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnb))] +pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")] + fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svaddhnt_s16(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svaddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")] + fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svaddhnt_s32(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svaddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")] + fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svaddhnt_s64(even, op1, op2) } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svaddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svaddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svaddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addhnt))] +pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svaddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")] + fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_s16(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")] + fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_s32(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")] + fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_s64(op1, op2) } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlb))] +pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")] + fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")] + fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")] + fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlb))] +pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv8i16" + )] + fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlbt_s16(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv4i32" + )] + fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlbt_s32(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.saddlbt.nxv2i64" + )] + fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlbt_s64(op1, op2) } +} +#[doc = "Add long (bottom + top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlbt))] +pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")] + fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_s16(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svaddlt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")] + fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_s32(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svaddlt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")] + fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_s64(op1, op2) } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddlt))] +pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svaddlt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")] + fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svaddlt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")] + fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svaddlt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")] + fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddlt))] +pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svaddlt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")] + fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svaddp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svaddp_f32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")] + fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svaddp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(faddp))] +pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svaddp_f64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")] + fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaddp_s8_m(pg, op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svaddp_s8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")] + fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svaddp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svaddp_s16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")] + fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svaddp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svaddp_s32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")] + fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svaddp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svaddp_s64_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svaddp_u8_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svaddp_u16_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svaddp_u32_m(pg, op1, op2) +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(addp))] +pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svaddp_u64_m(pg, op1, op2) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")] + fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_s16(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")] + fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_s32(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")] + fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_s64(op1, op2) } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwb))] +pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")] + fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")] + fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")] + fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwb))] +pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")] + fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_s16(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svaddwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")] + fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_s32(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svaddwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")] + fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_s64(op1, op2) } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(saddwt))] +pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svaddwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")] + fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svaddwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")] + fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svaddwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")] + fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Add wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uaddwt))] +pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svaddwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "AES single round decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesd))] +pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")] + fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES single round encryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aese))] +pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")] + fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "AES inverse mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesimc))] +pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")] + fn _svaesimc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "AES mix columns"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(aesmc))] +pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")] + fn _svaesmc_u8(op: svint8_t) -> svint8_t; + } + unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")] + fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbcax_s8(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbcax_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")] + fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbcax_s16(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbcax_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")] + fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbcax_s32(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbcax_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")] + fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbcax_s64(op1, op2, op3) } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbcax_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbcax_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbcax_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbcax_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise clear and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bcax))] +pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbcax_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")] + fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbdep_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")] + fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbdep_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")] + fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbdep_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")] + fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Scatter lower bits into positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bdep))] +pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbdep_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")] + fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbext_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")] + fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbext_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")] + fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbext_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")] + fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Gather lower bits from positions selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bext))] +pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbext_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")] + fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svbgrp_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")] + fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { + svbgrp_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")] + fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svbgrp_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")] + fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Group bits to right or left as selected by bitmask"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-bitperm")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bgrp))] +pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svbgrp_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")] + fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl1n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl1n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")] + fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl1n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl1n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")] + fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl1n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl1n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")] + fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl1n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl1n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl1n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl1n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl1n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with first input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl1n))] +pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl1n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")] + fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl2n_s8(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl2n_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")] + fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl2n_s16(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl2n_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")] + fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl2n_s32(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl2n_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")] + fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl2n_s64(op1, op2, op3) } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl2n_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl2n_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl2n_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl2n_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select with second input inverted"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl2n))] +pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl2n_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")] + fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")] + fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")] + fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")] + fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(bsl))] +pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")] + fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")] + fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")] + fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")] + fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] +pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe { svcadd_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32" + )] + fn _svcdot_lane_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64" + )] + fn _svcdot_lane_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")] + fn _svcdot_s32( + op1: svint32_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex dot product"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] +pub fn svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")] + fn _svcdot_s64( + op1: svint64_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16" + )] + fn _svcmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32" + )] + fn _svcmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s16::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svcmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_lane_s32::( + op1.as_signed(), + op2.as_signed(), + op3.as_signed(), + ) + .as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")] + fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")] + fn _svcmla_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")] + fn _svcmla_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")] + fn _svcmla_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u8( + op1: svuint8_t, + op2: svuint8_t, + op3: svuint8_t, +) -> svuint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s8::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Complex multiply-add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] +pub fn svcmla_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe { + svcmla_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")] + fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) + -> svfloat64_t; + } + unsafe { _svcvtlt_f64_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Up convert long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtlt))] +pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { + unsafe { svcvtlt_f64_f32_m(crate::intrinsics::transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")] + fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert and narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtnt))] +pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtnt_f32_f64_m(even, pg, op) +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")] + fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtx_f32_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe { svcvtx_f32_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Down convert, rounding to odd"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtx))] +pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op) +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")] + fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t; + } + unsafe { _svcvtxnt_f32_f64_m(even, pg.sve_into(), op) } +} +#[doc = "Down convert, rounding to odd (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fcvtxnt))] +pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t { + svcvtxnt_f32_f64_m(even, pg, op) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")] + fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _sveor3_s8(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + sveor3_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")] + fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _sveor3_s16(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + sveor3_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")] + fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _sveor3_s32(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + sveor3_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")] + fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _sveor3_s64(op1, op2, op3) } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + sveor3_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + sveor3_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + sveor3_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + sveor3_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR of three vectors"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eor3))] +pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + sveor3_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")] + fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveorbt_s8(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveorbt_s8(odd, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")] + fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveorbt_s16(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveorbt_s16(odd, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")] + fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveorbt_s32(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveorbt_s32(odd, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")] + fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveorbt_s64(odd, op1, op2) } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveorbt_s64(odd, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveorbt_u8(odd, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveorbt_u16(odd, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveorbt_u32(odd, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (bottom, top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eorbt))] +pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveorbt_u64(odd, op1, svdup_n_u64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")] + fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _sveortb_s8(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { + sveortb_s8(even, op1, svdup_n_s8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")] + fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _sveortb_s16(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { + sveortb_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")] + fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _sveortb_s32(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { + sveortb_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")] + fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _sveortb_s64(even, op1, op2) } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { + sveortb_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { + sveortb_u8(even, op1, svdup_n_u8(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { + sveortb_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { + sveortb_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Interleaving exclusive OR (top, bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(eortb))] +pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { + sveortb_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")] + fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_s8_m(pg, op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")] + fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")] + fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")] + fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shadd))] +pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")] + fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")] + fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")] + fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")] + fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, op1, op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhadd))] +pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv4i32" + )] + fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhistcnt_s32_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histcnt.nxv2i64" + )] + fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhistcnt_s64_z(pg.sve_into(), op1, op2).as_unsigned() } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histcnt))] +pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.histseg.nxv16i8" + )] + fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhistseg_s8(op1, op2).as_unsigned() } +} +#[doc = "Count matching elements in 128-bit segments"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(histseg))] +pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")] + fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")] + fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")] + fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")] + fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")] + fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")] + fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")] + fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")] + fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")] + fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_s8_m(pg, op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svhsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")] + fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svhsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")] + fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svhsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")] + fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shsub))] +pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svhsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")] + fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svhsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")] + fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svhsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svhsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")] + fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svhsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svhsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")] + fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svhsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, op1, op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Halving subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uhsub))] +pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svhsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_f64( + pg: svbool_t, + base: *const f64, + indices: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64" + )] + fn _svldnt1_gather_s64index_f64( + pg: svbool2_t, + base: *const f64, + indices: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64index_f64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_s64( + pg: svbool_t, + base: *const i64, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64" + )] + fn _svldnt1_gather_s64index_s64( + pg: svbool2_t, + base: *const i64, + indices: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64index_s64(pg.sve_into(), base, indices) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64index_u64( + pg: svbool_t, + base: *const u64, + indices: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_f64( + pg: svbool_t, + base: *const f64, + indices: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64index_f64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_s64( + pg: svbool_t, + base: *const i64, + indices: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64index_u64( + pg: svbool_t, + base: *const u64, + indices: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svint64_t, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64" + )] + fn _svldnt1_gather_s64offset_f64( + pg: svbool2_t, + base: *const f64, + offsets: svint64_t, + ) -> svfloat64_t; + } + _svldnt1_gather_s64offset_f64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64" + )] + fn _svldnt1_gather_s64offset_s64( + pg: svbool2_t, + base: *const i64, + offsets: svint64_t, + ) -> svint64_t; + } + _svldnt1_gather_s64offset_s64(pg.sve_into(), base, offsets) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_s64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_f32( + pg: svbool_t, + base: *const f32, + offsets: svuint32_t, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32" + )] + fn _svldnt1_gather_u32offset_f32( + pg: svbool4_t, + base: *const f32, + offsets: svint32_t, + ) -> svfloat32_t; + } + _svldnt1_gather_u32offset_f32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_s32( + pg: svbool_t, + base: *const i32, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32" + )] + fn _svldnt1_gather_u32offset_s32( + pg: svbool4_t, + base: *const i32, + offsets: svint32_t, + ) -> svint32_t; + } + _svldnt1_gather_u32offset_s32(pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32offset_u32( + pg: svbool_t, + base: *const u32, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_f64( + pg: svbool_t, + base: *const f64, + offsets: svuint64_t, +) -> svfloat64_t { + svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_s64( + pg: svbool_t, + base: *const i64, + offsets: svuint64_t, +) -> svint64_t { + svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64offset_u64( + pg: svbool_t, + base: *const u64, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svfloat32_t { + svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svfloat64_t { + svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3)) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_f32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svfloat32_t; + } + _svldnt1_gather_u32base_offset_f32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svldnt1_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> svint32_t; + } + _svldnt1_gather_u32base_offset_s32(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_f64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svfloat64_t; + } + _svldnt1_gather_u64base_offset_f64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svldnt1_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> svint64_t; + } + _svldnt1_gather_u64base_offset_s64(pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Unextended load, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1d))] +pub unsafe fn svldnt1_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1sb_gather_s64offset_s64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1sh_gather_s64offset_s64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1sw_gather_s64offset_s64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64offset_s64( + pg.sve_into(), + base, + offsets, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_s64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_s32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1sb_gather_u32offset_s32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_s32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1sh_gather_u32offset_s32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32offset_s32( + pg.sve_into(), + base, + offsets.as_signed(), + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32offset_u32( + pg: svbool_t, + base: *const i8, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32offset_u32( + pg: svbool_t, + base: *const i16, + offsets: svuint32_t, +) -> svuint32_t { + svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_s64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_s64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_s64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64offset_u64( + pg: svbool_t, + base: *const i8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64offset_u64( + pg: svbool_t, + base: *const i16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64offset_u64( + pg: svbool_t, + base: *const i32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1sb_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1sh_gather_u32base_offset_s32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u32base_offset_s32( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1sb_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast(_svldnt1sb_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1sh_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1sw_gather_u64base_offset_s64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_u64base_offset_s64( + pg.sve_into(), + bases.as_signed(), + offset, + )) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned() +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sb_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sb_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sb_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sb))] +pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sb_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_s64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1sh_gather_s64index_s64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast(_svldnt1sh_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_s64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1sw_gather_s64index_s64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast(_svldnt1sw_gather_s64index_s64(pg.sve_into(), base, indices)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_s64index_u64( + pg: svbool_t, + base: *const i16, + indices: svint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_s64index_u64( + pg: svbool_t, + base: *const i32, + indices: svint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_s64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_s64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64index_u64( + pg: svbool_t, + base: *const i16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64index_u64( + pg: svbool_t, + base: *const i32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned() +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sh))] +pub unsafe fn svldnt1sh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and sign-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1sw))] +pub unsafe fn svldnt1sw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_s64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8" + )] + fn _svldnt1ub_gather_s64offset_u64( + pg: svbool2_t, + base: *const i8, + offsets: svint64_t, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16" + )] + fn _svldnt1uh_gather_s64offset_u64( + pg: svbool2_t, + base: *const i16, + offsets: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32" + )] + fn _svldnt1uw_gather_s64offset_u64( + pg: svbool2_t, + base: *const i32, + offsets: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_s64offset_u64(pg.sve_into(), base.as_signed(), offsets).as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_s32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svint32_t { + svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_s32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svint32_t { + svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32offset_u32( + pg: svbool_t, + base: *const u8, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8" + )] + fn _svldnt1ub_gather_u32offset_u32( + pg: svbool4_t, + base: *const i8, + offsets: svint32_t, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32offset_u32( + pg: svbool_t, + base: *const u16, + offsets: svuint32_t, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16" + )] + fn _svldnt1uh_gather_u32offset_u32( + pg: svbool4_t, + base: *const i16, + offsets: svint32_t, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u32offset_u32(pg.sve_into(), base.as_signed(), offsets.as_signed()) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_s64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_s64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_s64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64offset_u64( + pg: svbool_t, + base: *const u8, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64offset_u64( + pg: svbool_t, + base: *const u16, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64offset_u64( + pg: svbool_t, + base: *const u32, + offsets: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svldnt1ub_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, +) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svldnt1uh_gather_u32base_offset_u32( + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ) -> nxv4i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u32base_offset_u32(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed() +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svldnt1ub_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i8; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1ub_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svldnt1uh_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svldnt1uw_gather_u64base_offset_u64( + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_u64base_offset_u64(pg.sve_into(), bases.as_signed(), offset) + .as_unsigned(), + ) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1ub_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1ub_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1ub_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, 0) +} +#[doc = "Load 8-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1b))] +pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1ub_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, 0) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_s64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_s64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_s64index_u64( + pg: svbool_t, + base: *const u16, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16" + )] + fn _svldnt1uh_gather_s64index_u64( + pg: svbool2_t, + base: *const i16, + indices: svint64_t, + ) -> nxv2i16; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uh_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_s64index_u64( + pg: svbool_t, + base: *const u32, + indices: svint64_t, +) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32" + )] + fn _svldnt1uw_gather_s64index_u64( + pg: svbool2_t, + base: *const i32, + indices: svint64_t, + ) -> nxv2i32; + } + crate::intrinsics::simd::simd_cast::( + _svldnt1uw_gather_s64index_u64(pg.sve_into(), base.as_signed(), indices).as_unsigned(), + ) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_s64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_s64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed() +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64index_u64( + pg: svbool_t, + base: *const u16, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64index_u64( + pg: svbool_t, + base: *const u32, + indices: svuint64_t, +) -> svuint64_t { + svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svint32_t { + svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, +) -> svuint32_t { + svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svint64_t { + svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Load 16-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1h))] +pub unsafe fn svldnt1uh_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1)) +} +#[doc = "Load 32-bit data and zero-extend, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ldnt1w))] +pub unsafe fn svldnt1uw_gather_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, +) -> svuint64_t { + svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2)) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")] + fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t; + } + unsafe { _svlogb_f32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { + unsafe { svlogb_f32_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { + svlogb_f32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")] + fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t; + } + unsafe { _svlogb_f64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { + unsafe { svlogb_f64_m(transmute_unchecked(op), pg, op) } +} +#[doc = "Base 2 logarithm as integer"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(flogb))] +pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { + svlogb_f64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")] + fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svmatch_s8(pg, op1, op2) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")] + fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect any matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(match))] +pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32" + )] + fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxnmp_f32_m(pg, op1, op2) +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64" + )] + fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxnmp))] +pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxnmp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")] + fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svmaxp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svmaxp_f32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")] + fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svmaxp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmaxp))] +pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svmaxp_f64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")] + fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_s8_m(pg, op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svmaxp_s8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")] + fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svmaxp_s16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")] + fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svmaxp_s32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")] + fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smaxp))] +pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svmaxp_s64_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")] + fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svmaxp_u8_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")] + fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svmaxp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svmaxp_u16_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")] + fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svmaxp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svmaxp_u32_m(pg, op1, op2) +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")] + fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svmaxp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Maximum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umaxp))] +pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svmaxp_u64_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv4f32" + )] + fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminnmp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminnmp_f32_m(pg, op1, op2) +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fminnmp.nxv2f64" + )] + fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminnmp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum number pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminnmp))] +pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminnmp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")] + fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t; + } + unsafe { _svminp_f32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + svminp_f32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")] + fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t; + } + unsafe { _svminp_f64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fminp))] +pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + svminp_f64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")] + fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_s8_m(pg, op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svminp_s8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")] + fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svminp_s16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")] + fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svminp_s32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")] + fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sminp))] +pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svminp_s64_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")] + fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svminp_u8_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")] + fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svminp_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svminp_u16_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")] + fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svminp_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svminp_u32_m(pg, op1, op2) +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")] + fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svminp_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Minimum pairwise"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uminp))] +pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svminp_u64_m(pg, op1, op2) +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv8i16" + )] + fn _svmla_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv4i32" + )] + fn _svmla_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mla.lane.nxv2i64" + )] + fn _svmla_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmla_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmla_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add, addend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] +pub fn svmla_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmla_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32" + )] + fn _svmlalb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] +pub fn svmlalb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64" + )] + fn _svmlalb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")] + fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")] + fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")] + fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalb))] +pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")] + fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")] + fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")] + fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalb))] +pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32" + )] + fn _svmlalt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] +pub fn svmlalt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64" + )] + fn _svmlalt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")] + fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_s16(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")] + fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_s32(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")] + fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_s64(op1, op2, op3) } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlalt))] +pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")] + fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlalt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")] + fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlalt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")] + fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlalt))] +pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlalt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv8i16" + )] + fn _svmls_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv4i32" + )] + fn _svmls_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mls.lane.nxv2i64" + )] + fn _svmls_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u16( + op1: svuint16_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { + svmls_lane_s16::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u32( + op1: svuint32_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { + svmls_lane_s32::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract, minuend first"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] +pub fn svmls_lane_u64( + op1: svuint64_t, + op2: svuint64_t, + op3: svuint64_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { + svmls_lane_s64::(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32" + )] + fn _svmlslb_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] +pub fn svmlslb_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64" + )] + fn _svmlslb_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")] + fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")] + fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")] + fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslb))] +pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")] + fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslb_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")] + fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslb_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")] + fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslb))] +pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslb_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u32( + op1: svuint32_t, + op2: svuint16_t, + op3: svuint16_t, +) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32" + )] + fn _svmlslt_lane_u32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { + _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] +pub fn svmlslt_lane_u64( + op1: svuint64_t, + op2: svuint32_t, + op3: svuint32_t, +) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64" + )] + fn _svmlslt_lane_u64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { + _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX) + .as_unsigned() + } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")] + fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_s16(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")] + fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_s32(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")] + fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_s64(op1, op2, op3) } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smlslt))] +pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")] + fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { + svmlslt_u16(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")] + fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { + svmlslt_u32(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")] + fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umlslt))] +pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { + svmlslt_u64(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s16(op: svint8_t) -> svint16_t { + svshllb_n_s16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s32(op: svint16_t) -> svint32_t { + svshllb_n_s32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb))] +pub fn svmovlb_s64(op: svint32_t) -> svint64_t { + svshllb_n_s64::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { + svshllb_n_u16::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { + svshllb_n_u32::<0>(op) +} +#[doc = "Move long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb))] +pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { + svshllb_n_u64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s16(op: svint8_t) -> svint16_t { + svshllt_n_s16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s32(op: svint16_t) -> svint32_t { + svshllt_n_s32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt))] +pub fn svmovlt_s64(op: svint32_t) -> svint64_t { + svshllt_n_s64::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { + svshllt_n_u16::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { + svshllt_n_u32::<0>(op) +} +#[doc = "Move long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt))] +pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { + svshllt_n_u64::<0>(op) +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32" + )] + fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t; + } + unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] +pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64" + )] + fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t; + } + unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv8i16" + )] + fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv4i32" + )] + fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.mul.lane.nxv2i64" + )] + fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe { svmul_lane_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe { svmul_lane_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] +pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe { svmul_lane_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32" + )] + fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64" + )] + fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32" + )] + fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64" + )] + fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")] + fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_s16(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")] + fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_s32(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")] + fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_s64(op1, op2) } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullb))] +pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")] + fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")] + fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")] + fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullb))] +pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32" + )] + fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64" + )] + fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32" + )] + fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64" + )] + fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] + fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_s16(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] + fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_s32(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] + fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_s64(op1, op2) } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(smullt))] +pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] + fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] + fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svmullt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] + fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(umullt))] +pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")] + fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svnbsl_s8(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svnbsl_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")] + fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svnbsl_s16(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svnbsl_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")] + fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svnbsl_s32(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svnbsl_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")] + fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svnbsl_s64(op1, op2, op3) } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svnbsl_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { + unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { + svnbsl_u8(op1, op2, svdup_n_u8(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t { + unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { + svnbsl_u16(op1, op2, svdup_n_u16(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svnbsl_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Bitwise select"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nbsl))] +pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svnbsl_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")] + fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t; + } + unsafe { _svnmatch_s8(pg, op1, op2) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")] + fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t; + } + unsafe { _svnmatch_s16(pg.sve_into(), op1, op2).sve_into() } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { + unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Detect no matching elements"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(nmatch))] +pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { + unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")] + fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmul))] +pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmul_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8" + )] + fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullb_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32" + )] + fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullb_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64" + )] + fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullb_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullb_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullb))] +pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8" + )] + fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { + svpmullt_pair_u8(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32" + )] + fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { + svpmullt_pair_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64" + )] + fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { + svpmullt_pair_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u8(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svpmullt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe { crate::intrinsics::transmute_unchecked(svpmullt_pair_u32(op1, op2)) } +} +#[doc = "Polynomial multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-aes")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(pmullt))] +pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svpmullt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")] + fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqabs_s8_m(inactive, pg, op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqabs_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")] + fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqabs_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqabs_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")] + fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqabs_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqabs_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")] + fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqabs_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(op, pg, op) +} +#[doc = "Saturating absolute value"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqabs))] +pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqabs_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")] + fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_s8_m(pg, op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")] + fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")] + fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")] + fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqadd))] +pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")] + fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")] + fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")] + fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")] + fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqadd))] +pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8" + )] + fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t; + } + unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16" + )] + fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t; + } + unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32" + )] + fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t; + } + unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating complex add with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] +pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64" + )] + fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t; + } + unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32" + )] + fn _svqdmlalb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] +pub fn svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64" + )] + fn _svqdmlalb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16" + )] + fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32" + )] + fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64" + )] + fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalb))] +pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16" + )] + fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32" + )] + fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64" + )] + fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalbt))] +pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32" + )] + fn _svqdmlalt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] +pub fn svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64" + )] + fn _svqdmlalt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16" + )] + fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlalt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlalt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32" + )] + fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlalt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlalt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64" + )] + fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlalt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-add long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlalt))] +pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlalt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32" + )] + fn _svqdmlslb_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] +pub fn svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64" + )] + fn _svqdmlslb_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16" + )] + fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslb_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslb_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32" + )] + fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslb_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslb_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64" + )] + fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslb_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslb))] +pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslb_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16" + )] + fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslbt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslbt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32" + )] + fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslbt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslbt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64" + )] + fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslbt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (bottom × top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslbt))] +pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslbt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32" + )] + fn _svqdmlslt_lane_s32( + op1: svint32_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] +pub fn svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64" + )] + fn _svqdmlslt_lane_s64( + op1: svint64_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16" + )] + fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t; + } + unsafe { _svqdmlslt_s16(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { + svqdmlslt_s16(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32" + )] + fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t; + } + unsafe { _svqdmlslt_s32(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { + svqdmlslt_s32(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64" + )] + fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t; + } + unsafe { _svqdmlslt_s64(op1, op2, op3) } +} +#[doc = "Saturating doubling multiply-subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmlslt))] +pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { + svqdmlslt_s64(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16" + )] + fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32" + )] + fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] +pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64" + )] + fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8" + )] + fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqdmulh_s8(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16" + )] + fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqdmulh_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32" + )] + fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqdmulh_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64" + )] + fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqdmulh_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmulh))] +pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32" + )] + fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] +pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64" + )] + fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16" + )] + fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullb_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32" + )] + fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullb_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64" + )] + fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullb_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullb))] +pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32" + )] + fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] +pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64" + )] + fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16" + )] + fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svqdmullt_s16(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svqdmullt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32" + )] + fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svqdmullt_s32(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svqdmullt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64" + )] + fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svqdmullt_s64(op1, op2) } +} +#[doc = "Saturating doubling multiply long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqdmullt))] +pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svqdmullt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")] + fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t; + } + unsafe { _svqneg_s8_m(inactive, pg, op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { + svqneg_s8_m(svdup_n_s8(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")] + fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t; + } + unsafe { _svqneg_s16_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { + svqneg_s16_m(svdup_n_s16(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")] + fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svqneg_s32_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { + svqneg_s32_m(svdup_n_s32(0), pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")] + fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t; + } + unsafe { _svqneg_s64_m(inactive, pg.sve_into(), op) } +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(op, pg, op) +} +#[doc = "Saturating negate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqneg))] +pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { + svqneg_s64_m(svdup_n_s64(0), pg, op) +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=3); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16" + )] + fn _svqrdcmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] +pub fn svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=1); + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32" + )] + fn _svqrdcmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_index: i32, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, +) -> svint8_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8" + )] + fn _svqrdcmlah_s8( + op1: svint8_t, + op2: svint8_t, + op3: svint8_t, + imm_rotation: i32, + ) -> svint8_t; + } + unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16" + )] + fn _svqrdcmlah_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + imm_rotation: i32, + ) -> svint16_t; + } + unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32" + )] + fn _svqrdcmlah_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + imm_rotation: i32, + ) -> svint32_t; + } + unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling complex multiply-add high with rotate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] +pub fn svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert!( + IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270 + ); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64" + )] + fn _svqrdcmlah_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + imm_rotation: i32, + ) -> svint64_t; + } + unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16" + )] + fn _svqrdmlah_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32" + )] + fn _svqrdmlah_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] +pub fn svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64" + )] + fn _svqrdmlah_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8" + )] + fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlah_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlah_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16" + )] + fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlah_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlah_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32" + )] + fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlah_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlah_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64" + )] + fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlah_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-add high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlah))] +pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlah_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, +) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16" + )] + fn _svqrdmlsh_lane_s16( + op1: svint16_t, + op2: svint16_t, + op3: svint16_t, + IMM_INDEX: i32, + ) -> svint16_t; + } + unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, +) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32" + )] + fn _svqrdmlsh_lane_s32( + op1: svint32_t, + op2: svint32_t, + op3: svint32_t, + IMM_INDEX: i32, + ) -> svint32_t; + } + unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] +pub fn svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, +) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64" + )] + fn _svqrdmlsh_lane_s64( + op1: svint64_t, + op2: svint64_t, + op3: svint64_t, + IMM_INDEX: i32, + ) -> svint64_t; + } + unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8" + )] + fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t; + } + unsafe { _svqrdmlsh_s8(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { + svqrdmlsh_s8(op1, op2, svdup_n_s8(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16" + )] + fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t; + } + unsafe { _svqrdmlsh_s16(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { + svqrdmlsh_s16(op1, op2, svdup_n_s16(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32" + )] + fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svqrdmlsh_s32(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { + svqrdmlsh_s32(op1, op2, svdup_n_s32(op3)) +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64" + )] + fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svqrdmlsh_s64(op1, op2, op3) } +} +#[doc = "Saturating rounding doubling multiply-subtract high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmlsh))] +pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { + svqrdmlsh_s64(op1, op2, svdup_n_s64(op3)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM_INDEX, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16" + )] + fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t; + } + unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM_INDEX, 0..=3); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32" + )] + fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t; + } + unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] +pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM_INDEX, 0..=1); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64" + )] + fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t; + } + unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8" + )] + fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrdmulh_s8(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { + svqrdmulh_s8(op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16" + )] + fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrdmulh_s16(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { + svqrdmulh_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32" + )] + fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrdmulh_s32(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { + svqrdmulh_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64" + )] + fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrdmulh_s64(op1, op2) } +} +#[doc = "Saturating rounding doubling multiply high"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrdmulh))] +pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { + svqrdmulh_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")] + fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")] + fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")] + fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")] + fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshl))] +pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")] + fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")] + fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")] + fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")] + fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshl))] +pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16" + )] + fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32" + )] + fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] +pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64" + )] + fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16" + )] + fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32" + )] + fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] +pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64" + )] + fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16" + )] + fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32" + )] + fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] +pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64" + )] + fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16" + )] + fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32" + )] + fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating rounding shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] +pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64" + )] + fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")] + fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_s8_m(pg, op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")] + fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")] + fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")] + fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshl))] +pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")] + fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svqshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")] + fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svqshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")] + fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svqshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")] + fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, op1, op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshl))] +pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svqshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")] + fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t { + svqshlu_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")] + fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshlu_n_s16_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint16_t { + svqshlu_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")] + fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshlu_n_s32_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint32_t { + svqshlu_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")] + fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svqshlu_n_s64_m(pg.sve_into(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, op1) +} +#[doc = "Saturating shift left unsigned"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] +pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint64_t { + svqshlu_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16" + )] + fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_s16(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32" + )] + fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_s32(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64" + )] + fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_s64(op1, IMM2) } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16" + )] + fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32" + )] + fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] +pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64" + )] + fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16" + )] + fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32" + )] + fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64" + )] + fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16" + )] + fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32" + )] + fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] +pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64" + )] + fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16" + )] + fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32" + )] + fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] +pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64" + )] + fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16" + )] + fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32" + )] + fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating shift right unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] +pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64" + )] + fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")] + fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsub_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")] + fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsub_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")] + fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsub_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")] + fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsub))] +pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsub_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")] + fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsub_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")] + fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsub_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsub_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")] + fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsub_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsub_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")] + fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsub_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsub))] +pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsub_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")] + fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_s8_m(pg, op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svqsubr_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")] + fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svqsubr_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")] + fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svqsubr_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")] + fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqsubr))] +pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svqsubr_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")] + fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svqsubr_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")] + fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svqsubr_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svqsubr_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")] + fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svqsubr_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svqsubr_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")] + fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svqsubr_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, op1, op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating subtract reversed"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqsubr))] +pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svqsubr_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")] + fn _svqxtnb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_s16(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")] + fn _svqxtnb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_s32(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnb))] +pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")] + fn _svqxtnb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_s64(op) } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")] + fn _svqxtnb_u16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")] + fn _svqxtnb_u32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnb))] +pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")] + fn _svqxtnb_u64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")] + fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_s16(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")] + fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_s32(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtnt))] +pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")] + fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_s64(even, op) } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")] + fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")] + fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uqxtnt))] +pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")] + fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16" + )] + fn _svqxtunb_s16(op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunb_s16(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32" + )] + fn _svqxtunb_s32(op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunb_s32(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunb))] +pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64" + )] + fn _svqxtunb_s64(op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunb_s64(op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16" + )] + fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t; + } + unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32" + )] + fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t; + } + unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() } +} +#[doc = "Saturating extract unsigned narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sqxtunt))] +pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64" + )] + fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t; + } + unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv8i16" + )] + fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnb_s16(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svraddhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv4i32" + )] + fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnb_s32(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svraddhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnb.nxv2i64" + )] + fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnb_s64(op1, op2) } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svraddhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnb))] +pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv8i16" + )] + fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svraddhnt_s16(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svraddhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv4i32" + )] + fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svraddhnt_s32(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svraddhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.raddhnt.nxv2i64" + )] + fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svraddhnt_s64(even, op1, op2) } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svraddhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svraddhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svraddhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding add narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(raddhnt))] +pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svraddhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")] + fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrax1_s64(op1, op2) } +} +#[doc = "Bitwise rotate left by 1 and exclusive OR"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sha3")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rax1))] +pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")] + fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(op, pg, op) +} +#[doc = "Reciprocal estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urecpe))] +pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrecpe_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")] + fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_s8_m(pg, op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrhadd_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")] + fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrhadd_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")] + fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrhadd_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")] + fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srhadd))] +pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrhadd_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")] + fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { + svrhadd_u8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")] + fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrhadd_u16_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { + svrhadd_u16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")] + fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrhadd_u32_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { + svrhadd_u32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")] + fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrhadd_u64_m(pg.sve_into(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, op1, op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding halving add"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urhadd))] +pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { + svrhadd_u64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")] + fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_s8_m(pg, op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { + svrshl_s8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")] + fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_s16_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { + svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { + svrshl_s16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")] + fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_s32_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { + svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { + svrshl_s32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")] + fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_s64_m(pg.sve_into(), op1, op2) } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { + svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshl))] +pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { + svrshl_s64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")] + fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svrshl_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")] + fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svrshl_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svrshl_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")] + fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svrshl_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svrshl_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")] + fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svrshl_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, op1, op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Rounding shift left"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshl))] +pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svrshl_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")] + fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_s8_m(pg, op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { + svrshr_n_s8_m::(pg, svsel_s8(pg, op1, svdup_n_s8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")] + fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_s16_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_t { + svrshr_n_s16_m::(pg, svsel_s16(pg, op1, svdup_n_s16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")] + fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_s32_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_t { + svrshr_n_s32_m::(pg, svsel_s32(pg, op1, svdup_n_s32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")] + fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_s64_m(pg.sve_into(), op1, IMM2) } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] +pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_t { + svrshr_n_s64_m::(pg, svsel_s64(pg, op1, svdup_n_s64(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")] + fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t { + svrshr_n_u8_m::(pg, svsel_u8(pg, op1, svdup_n_u8(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")] + fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshr_n_u16_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint16_t { + svrshr_n_u16_m::(pg, svsel_u16(pg, op1, svdup_n_u16(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")] + fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshr_n_u32_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint32_t { + svrshr_n_u32_m::(pg, svsel_u32(pg, op1, svdup_n_u32(0))) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + static_assert_range!(IMM2, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")] + fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t; + } + unsafe { _svrshr_n_u64_m(pg.sve_into(), op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, op1) +} +#[doc = "Rounding shift right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] +pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint64_t { + svrshr_n_u64_m::(pg, svsel_u64(pg, op1, svdup_n_u64(0))) +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")] + fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnb_n_s16(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")] + fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnb_n_s32(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")] + fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnb_n_s64(op1, IMM2) } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] +pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")] + fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svrshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")] + fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svrshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")] + fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svrshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svrshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svrshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Rounding shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] +pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svrshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ursqrte.nxv4i32" + )] + fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t; + } + unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.sve_into(), op.as_signed()).as_unsigned() } +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(op, pg, op) +} +#[doc = "Reciprocal square root estimate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursqrte))] +pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { + svrsqrte_u32_m(svdup_n_u32(0), pg, op) +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")] + fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")] + fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")] + fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] +pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")] + fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")] + fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")] + fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")] + fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] +pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")] + fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16" + )] + fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnb_s16(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32" + )] + fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnb_s32(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64" + )] + fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnb_s64(op1, op2) } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnb))] +pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16" + )] + fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svrsubhnt_s16(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svrsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32" + )] + fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svrsubhnt_s32(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svrsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64" + )] + fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svrsubhnt_s64(even, op1, op2) } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svrsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svrsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svrsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Rounding subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(rsubhnt))] +pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svrsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")] + fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclb_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")] + fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclb))] +pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclb_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")] + fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t; + } + unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { + svsbclt_u32(op1, op2, svdup_n_u32(op3)) +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")] + fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t; + } + unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() } +} +#[doc = "Subtract with borrow long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sbclt))] +pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { + svsbclt_u64(op1, op2, svdup_n_u64(op3)) +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")] + fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")] + fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] +pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")] + fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")] + fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")] + fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] +pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")] + fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")] + fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_s16(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")] + fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_s32(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] +pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")] + fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_s64(op1, IMM2) } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { + static_assert_range!(IMM2, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")] + fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t; + } + unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { + static_assert_range!(IMM2, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")] + fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t; + } + unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift left long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] +pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { + static_assert_range!(IMM2, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")] + fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t; + } + unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")] + fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnb_n_s16(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")] + fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnb_n_s32(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")] + fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnb_n_s64(op1, IMM2) } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnb_n_s16::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnb_n_s32::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] +pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnb_n_s64::(op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")] + fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t; + } + unsafe { _svshrnt_n_s16(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")] + fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t; + } + unsafe { _svshrnt_n_s32(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")] + fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t; + } + unsafe { _svshrnt_n_s64(even, op1, IMM2) } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuint8_t { + static_assert_range!(IMM2, 1..=8); + unsafe { svshrnt_n_s16::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svuint16_t { + static_assert_range!(IMM2, 1..=16); + unsafe { svshrnt_n_s32::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift right narrow (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] +pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svuint32_t { + static_assert_range!(IMM2, 1..=32); + unsafe { svshrnt_n_s64::(even.as_signed(), op1.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")] + fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsli_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")] + fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsli_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")] + fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsli_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")] + fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsli_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 0..=7); + unsafe { svsli_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 0..=15); + unsafe { svsli_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 0..=31); + unsafe { svsli_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift left and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sli, IMM3 = 0))] +pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 0..=63); + unsafe { svsli_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 encryption and decryption"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4e))] +pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")] + fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "SM4 key updates"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2,sve2-sm4")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sm4ekey))] +pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")] + fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")] + fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_m(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_x(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { + svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { + svsqadd_u8_z(pg, op1, svdup_n_s8(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")] + fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svsqadd_u16_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_m(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_x(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t { + svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { + svsqadd_u16_z(pg, op1, svdup_n_s16(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")] + fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svsqadd_u32_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_m(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_x(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t { + svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { + svsqadd_u32_z(pg, op1, svdup_n_s32(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")] + fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svsqadd_u64_m(pg.sve_into(), op1.as_signed(), op2).as_unsigned() } +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_m(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, op1, op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_x(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t { + svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2) +} +#[doc = "Saturating add with signed addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usqadd))] +pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { + svsqadd_u64_z(pg, op1, svdup_n_s64(op2)) +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")] + fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")] + fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")] + fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] +pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")] + fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")] + fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")] + fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")] + fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and accumulate"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usra, IMM3 = 1))] +pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")] + fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")] + fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svsri_n_s8(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")] + fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svsri_n_s16(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")] + fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svsri_n_s32(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")] + fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svsri_n_s64(op1, op2, IMM3) } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svsri_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svsri_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svsri_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Shift right and insert"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sri, IMM3 = 1))] +pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svsri_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64" + )] + fn _svstnt1_scatter_s64index_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_f64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64" + )] + fn _svstnt1_scatter_s64index_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + indices: svint64_t, + ); + } + _svstnt1_scatter_s64index_s64(data, pg.sve_into(), base, indices) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_f64( + pg: svbool_t, + base: *mut f64, + indices: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_s64( + pg: svbool_t, + base: *mut i64, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64index_u64( + pg: svbool_t, + base: *mut u64, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svint64_t, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64" + )] + fn _svstnt1_scatter_s64offset_f64( + data: svfloat64_t, + pg: svbool2_t, + base: *mut f64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_f64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64" + )] + fn _svstnt1_scatter_s64offset_s64( + data: svint64_t, + pg: svbool2_t, + base: *mut i64, + offsets: svint64_t, + ); + } + _svstnt1_scatter_s64offset_s64(data, pg.sve_into(), base, offsets) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_f32( + pg: svbool_t, + base: *mut f32, + offsets: svuint32_t, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32" + )] + fn _svstnt1_scatter_u32offset_f32( + data: svfloat32_t, + pg: svbool4_t, + base: *mut f32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_f32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i32, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32" + )] + fn _svstnt1_scatter_u32offset_s32( + data: svint32_t, + pg: svbool4_t, + base: *mut i32, + offsets: svint32_t, + ); + } + _svstnt1_scatter_u32offset_s32(data, pg.sve_into(), base, offsets.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u32, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_f64( + pg: svbool_t, + base: *mut f64, + offsets: svuint64_t, + data: svfloat64_t, +) { + svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i64, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u64, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) { + svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) { + svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_f32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svfloat32_t, +) { + svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_f64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svfloat64_t, +) { + svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_f32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svfloat32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_f32( + data: svfloat32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_f32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32" + )] + fn _svstnt1_scatter_u32base_offset_s32( + data: svint32_t, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1_scatter_u32base_offset_s32(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_f64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svfloat64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_f64( + data: svfloat64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_f64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64" + )] + fn _svstnt1_scatter_u64base_offset_s64( + data: svint64_t, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1_scatter_u64base_offset_s64(data, pg.sve_into(), bases.as_signed(), offset) +} +#[doc = "Non-truncating store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1d))] +pub unsafe fn svstnt1_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8" + )] + fn _svstnt1b_scatter_s64offset_s64( + data: nxv2i8, + pg: svbool2_t, + base: *mut i8, + offsets: svint64_t, + ); + } + _svstnt1b_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16" + )] + fn _svstnt1h_scatter_s64offset_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + offsets: svint64_t, + ); + } + _svstnt1h_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32" + )] + fn _svstnt1w_scatter_s64offset_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + offsets: svint64_t, + ); + } + _svstnt1w_scatter_s64offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i8, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8" + )] + fn _svstnt1b_scatter_u32offset_s32( + data: nxv4i8, + pg: svbool4_t, + base: *mut i8, + offsets: svint32_t, + ); + } + _svstnt1b_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_s32( + pg: svbool_t, + base: *mut i16, + offsets: svuint32_t, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16" + )] + fn _svstnt1h_scatter_u32offset_s32( + data: nxv4i16, + pg: svbool4_t, + base: *mut i16, + offsets: svint32_t, + ); + } + _svstnt1h_scatter_u32offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + offsets.as_signed(), + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u8, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32offset_u32( + pg: svbool_t, + base: *mut u16, + offsets: svuint32_t, + data: svuint32_t, +) { + svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i8, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i16, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_s64( + pg: svbool_t, + base: *mut i32, + offsets: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u8, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u16, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64offset_u64( + pg: svbool_t, + base: *mut u32, + offsets: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32" + )] + fn _svstnt1b_scatter_u32base_offset_s32( + data: nxv4i8, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1b_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_s32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svint32_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32" + )] + fn _svstnt1h_scatter_u32base_offset_s32( + data: nxv4i16, + pg: svbool4_t, + bases: svint32_t, + offset: i64, + ); + } + _svstnt1h_scatter_u32base_offset_s32( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_offset_u32( + pg: svbool_t, + bases: svuint32_t, + offset: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64" + )] + fn _svstnt1b_scatter_u64base_offset_s64( + data: nxv2i8, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1b_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64" + )] + fn _svstnt1h_scatter_u64base_offset_s64( + data: nxv2i16, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1h_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_s64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64" + )] + fn _svstnt1w_scatter_u64base_offset_s64( + data: nxv2i32, + pg: svbool2_t, + bases: svint64_t, + offset: i64, + ); + } + _svstnt1w_scatter_u64base_offset_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + bases.as_signed(), + offset, + ) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_offset_u64( + pg: svbool_t, + bases: svuint64_t, + offset: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed()) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data) +} +#[doc = "Truncate to 8 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1b))] +pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16" + )] + fn _svstnt1h_scatter_s64index_s64( + data: nxv2i16, + pg: svbool2_t, + base: *mut i16, + indices: svint64_t, + ); + } + _svstnt1h_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svint64_t, + data: svint64_t, +) { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32" + )] + fn _svstnt1w_scatter_s64index_s64( + data: nxv2i32, + pg: svbool2_t, + base: *mut i32, + indices: svint64_t, + ); + } + _svstnt1w_scatter_s64index_s64( + crate::intrinsics::simd::simd_cast(data), + pg.sve_into(), + base, + indices, + ) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_s64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_s64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_s64( + pg: svbool_t, + base: *mut i16, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_s64( + pg: svbool_t, + base: *mut i32, + indices: svuint64_t, + data: svint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64index_u64( + pg: svbool_t, + base: *mut u16, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64index_u64( + pg: svbool_t, + base: *mut u32, + indices: svuint64_t, + data: svuint64_t, +) { + svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed()) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_s32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svint32_t, +) { + svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u32base_index_u32( + pg: svbool_t, + bases: svuint32_t, + index: i64, + data: svuint32_t, +) { + svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_s64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svint64_t, +) { + svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Truncate to 16 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1h))] +pub unsafe fn svstnt1h_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data) +} +#[doc = "Truncate to 32 bits and store, non-temporal"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] +#[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] +#[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] +#[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(stnt1w))] +pub unsafe fn svstnt1w_scatter_u64base_index_u64( + pg: svbool_t, + bases: svuint64_t, + index: i64, + data: svuint64_t, +) { + svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")] + fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnb_s16(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { + svsubhnb_s16(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")] + fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnb_s32(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { + svsubhnb_s32(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")] + fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnb_s64(op1, op2) } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { + svsubhnb_s64(op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnb_u16(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnb_u32(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnb))] +pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnb_u64(op1, svdup_n_u64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")] + fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t; + } + unsafe { _svsubhnt_s16(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { + svsubhnt_s16(even, op1, svdup_n_s16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")] + fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t; + } + unsafe { _svsubhnt_s32(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { + svsubhnt_s32(even, op1, svdup_n_s32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")] + fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t; + } + unsafe { _svsubhnt_s64(even, op1, op2) } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { + svsubhnt_s64(even, op1, svdup_n_s64(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t { + unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { + svsubhnt_u16(even, op1, svdup_n_u16(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t { + unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t { + svsubhnt_u32(even, op1, svdup_n_u32(op2)) +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t { + unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract narrow high part (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(subhnt))] +pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t { + svsubhnt_u64(even, op1, svdup_n_u64(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")] + fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_s16(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")] + fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_s32(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")] + fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_s64(op1, op2) } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublb))] +pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")] + fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")] + fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")] + fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublb))] +pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv8i16" + )] + fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublbt_s16(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublbt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv4i32" + )] + fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublbt_s32(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublbt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssublbt.nxv2i64" + )] + fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublbt_s64(op1, op2) } +} +#[doc = "Subtract long (bottom - top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublbt))] +pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublbt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")] + fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_s16(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsublt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")] + fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_s32(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsublt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")] + fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_s64(op1, op2) } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssublt))] +pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsublt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")] + fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { + svsublt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")] + fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { + svsublt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")] + fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract long (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usublt))] +pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { + svsublt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv8i16" + )] + fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubltb_s16(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { + svsubltb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv4i32" + )] + fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubltb_s32(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { + svsubltb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.ssubltb.nxv2i64" + )] + fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubltb_s64(op1, op2) } +} +#[doc = "Subtract long (top - bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubltb))] +pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { + svsubltb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")] + fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_s16(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwb_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")] + fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_s32(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwb_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")] + fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_s64(op1, op2) } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwb))] +pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwb_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")] + fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwb_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")] + fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwb_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")] + fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (bottom)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwb))] +pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwb_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")] + fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_s16(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { + svsubwt_s16(op1, svdup_n_s8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")] + fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_s32(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { + svsubwt_s32(op1, svdup_n_s16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")] + fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_s64(op1, op2) } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(ssubwt))] +pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { + svsubwt_s64(op1, svdup_n_s32(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")] + fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t; + } + unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { + svsubwt_u16(op1, svdup_n_u8(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")] + fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t; + } + unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { + svsubwt_u32(op1, svdup_n_u16(op2)) +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")] + fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t; + } + unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Subtract wide (top)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(usubwt))] +pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { + svsubwt_u64(op1, svdup_n_u32(op2)) +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")] + fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { + _svtbl2_f32( + svget2_f32::<0>(data), + svget2_f32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")] + fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { + _svtbl2_f64( + svget2_f64::<0>(data), + svget2_f64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")] + fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { + _svtbl2_s8( + svget2_s8::<0>(data), + svget2_s8::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")] + fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { + _svtbl2_s16( + svget2_s16::<0>(data), + svget2_s16::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")] + fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { + _svtbl2_s32( + svget2_s32::<0>(data), + svget2_s32::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")] + fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { + _svtbl2_s64( + svget2_s64::<0>(data), + svget2_s64::<1>(data), + indices.as_signed(), + ) + } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in two-vector table"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbl))] +pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")] + fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t; + } + unsafe { _svtbx_f32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")] + fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t; + } + unsafe { _svtbx_f64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")] + fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t; + } + unsafe { _svtbx_s8(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")] + fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t; + } + unsafe { _svtbx_s16(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")] + fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t; + } + unsafe { _svtbx_s32(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")] + fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t; + } + unsafe { _svtbx_s64(fallback, data, indices.as_signed()) } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t { + unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t { + unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t { + unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Table lookup in single-vector table (merging)"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(tbx))] +pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t { + unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpkhi))] +pub fn svunpkhi_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpkhi.nxv16i1" + )] + fn _svunpkhi_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpkhi_b(op).sve_into() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16" + )] + fn _svunpkhi_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_s16(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32" + )] + fn _svunpkhi_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_s32(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpkhi))] +pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64" + )] + fn _svunpkhi_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_s64(op) } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16" + )] + fn _svunpkhi_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32" + )] + fn _svunpkhi_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend high half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpkhi))] +pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64" + )] + fn _svunpkhi_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(punpklo))] +pub fn svunpklo_b(op: svbool_t) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.punpklo.nxv16i1" + )] + fn _svunpklo_b(op: svbool_t) -> svbool8_t; + } + unsafe { _svunpklo_b(op).sve_into() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s16(op: svint8_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv8i16" + )] + fn _svunpklo_s16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_s16(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s32(op: svint16_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv4i32" + )] + fn _svunpklo_s32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_s32(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(sunpklo))] +pub fn svunpklo_s64(op: svint32_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.sunpklo.nxv2i64" + )] + fn _svunpklo_s64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_s64(op) } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv8i16" + )] + fn _svunpklo_u16(op: svint8_t) -> svint16_t; + } + unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv4i32" + )] + fn _svunpklo_u32(op: svint16_t) -> svint32_t; + } + unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() } +} +#[doc = "Unpack and extend low half"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(uunpklo))] +pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.uunpklo.nxv2i64" + )] + fn _svunpklo_u64(op: svint32_t) -> svint64_t; + } + unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")] + fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t; + } + unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_m(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_x(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { + svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { + svuqadd_s8_z(pg, op1, svdup_n_u8(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")] + fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t; + } + unsafe { _svuqadd_s16_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_m(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_x(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { + svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { + svuqadd_s16_z(pg, op1, svdup_n_u16(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")] + fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t; + } + unsafe { _svuqadd_s32_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_m(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_x(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { + svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { + svuqadd_s32_z(pg, op1, svdup_n_u32(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")] + fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t; + } + unsafe { _svuqadd_s64_m(pg.sve_into(), op1, op2.as_signed()) } +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_m(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, op1, op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_x(pg, op1, svdup_n_u64(op2)) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { + svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2) +} +#[doc = "Saturating add with unsigned addend"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(suqadd))] +pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { + svuqadd_s64_z(pg, op1, svdup_n_u64(op2)) +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32" + )] + fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32" + )] + fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32" + )] + fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32" + )] + fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64" + )] + fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64" + )] + fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64" + )] + fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilege))] +pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64" + )] + fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32" + )] + fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32" + )] + fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32" + )] + fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32" + )] + fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64" + )] + fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64" + )] + fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64" + )] + fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than or equal to"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehs))] +pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64" + )] + fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32" + )] + fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_s32(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32" + )] + fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32" + )] + fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32" + )] + fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s32(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64" + )] + fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_s64(op1, op2) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64" + )] + fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64" + )] + fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilegt))] +pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64" + )] + fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_s64(op1, op2).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32" + )] + fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t; + } + unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32" + )] + fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32" + )] + fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32" + )] + fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64" + )] + fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t; + } + unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64" + )] + fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t; + } + unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64" + )] + fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t; + } + unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[doc = "While decrementing scalar is greater than"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilehi))] +pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64" + )] + fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t; + } + unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0" + )] + fn _svwhilerw_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilerw_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0" + )] + fn _svwhilerw_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilerw_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0" + )] + fn _svwhilerw_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilerw_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0" + )] + fn _svwhilerw_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilerw_64ptr(op1, op2).sve_into() +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilerw_8ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilerw_16ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilerw_32ptr::(op1, op2) +} +#[doc = "While free of read-after-write conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilerw))] +pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilerw_64ptr::(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0" + )] + fn _svwhilewr_8ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool_t; + } + _svwhilewr_8ptr(op1, op2) +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0" + )] + fn _svwhilewr_16ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool8_t; + } + _svwhilewr_16ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0" + )] + fn _svwhilewr_32ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool4_t; + } + _svwhilewr_32ptr(op1, op2).sve_into() +} +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { + let op1 = op1 as *const crate::ffi::c_void; + let op2 = op2 as *const crate::ffi::c_void; + unsafe extern "unadjusted" { + #[cfg_attr( + target_arch = "aarch64", + link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0" + )] + fn _svwhilewr_64ptr( + op1: *const crate::ffi::c_void, + op2: *const crate::ffi::c_void, + ) -> svbool2_t; + } + _svwhilewr_64ptr(op1, op2).sve_into() +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { + svwhilewr_8ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { + svwhilewr_16ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { + svwhilewr_32ptr::(op1, op2) +} +#[doc = "While free of write-after-read conflicts"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] +#[doc = "## Safety"] +#[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(whilewr))] +pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { + svwhilewr_64ptr::(op1, op2) +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")] + fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t; + } + unsafe { _svxar_n_s8(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")] + fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t; + } + unsafe { _svxar_n_s16(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")] + fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t; + } + unsafe { _svxar_n_s32(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe extern "unadjusted" { + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")] + fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t; + } + unsafe { _svxar_n_s64(op1, op2, IMM3) } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { + static_assert_range!(IMM3, 1..=8); + unsafe { svxar_n_s8::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { + static_assert_range!(IMM3, 1..=16); + unsafe { svxar_n_s16::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { + static_assert_range!(IMM3, 1..=32); + unsafe { svxar_n_s32::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} +#[doc = "Bitwise exclusive OR and rotate right"] +#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] +#[inline(always)] +#[target_feature(enable = "sve,sve2")] +#[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] +#[cfg_attr(test, assert_instr(xar, IMM3 = 1))] +pub fn svxar_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { + static_assert_range!(IMM3, 1..=64); + unsafe { svxar_n_s64::(op1.as_signed(), op2.as_signed()).as_unsigned() } +} diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs new file mode 100644 index 000000000000..2ec3ad6a5d04 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/ld_st_tests_aarch64.rs @@ -0,0 +1,2482 @@ +// This code is automatically generated. DO NOT MODIFY. +// +// Instead, modify `crates/stdarch-gen-arm/spec/sve` and run the following command to re-generate +// this file: +// +// ``` +// cargo run --bin=stdarch-gen-arm -- crates/stdarch-gen-arm/spec +// ``` +#![allow(unused)] +use super::*; +use std::boxed::Box; +use std::convert::{TryFrom, TryInto}; +use std::sync::LazyLock; +use std::vec::Vec; +use stdarch_test::simd_test; +static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as f32) + .collect::>() + .try_into() + .expect("f32 data incorrectly initialised") +}); +static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as f64) + .collect::>() + .try_into() + .expect("f64 data incorrectly initialised") +}); +static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| ((i + 128) % 256 - 128) as i8) + .collect::>() + .try_into() + .expect("i8 data incorrectly initialised") +}); +static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as i16) + .collect::>() + .try_into() + .expect("i16 data incorrectly initialised") +}); +static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as i32) + .collect::>() + .try_into() + .expect("i32 data incorrectly initialised") +}); +static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as i64) + .collect::>() + .try_into() + .expect("i64 data incorrectly initialised") +}); +static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| { + (0..256 * 5) + .map(|i| i as u8) + .collect::>() + .try_into() + .expect("u8 data incorrectly initialised") +}); +static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| { + (0..128 * 5) + .map(|i| i as u16) + .collect::>() + .try_into() + .expect("u16 data incorrectly initialised") +}); +static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| { + (0..64 * 5) + .map(|i| i as u32) + .collect::>() + .try_into() + .expect("u32 data incorrectly initialised") +}); +static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| { + (0..32 * 5) + .map(|i| i as u64) + .collect::>() + .try_into() + .expect("u64 data incorrectly initialised") +}); +#[target_feature(enable = "sve")] +fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_f32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_f64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_s8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_s16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_s32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_s64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b8(), defined)); + let cmp = svcmpne_u8(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b16(), defined)); + let cmp = svcmpne_u16(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b32(), defined)); + let cmp = svcmpne_u32(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[target_feature(enable = "sve")] +fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) { + let defined = svrdffr(); + assert!(svptest_first(svptrue_b64(), defined)); + let cmp = svcmpne_u64(defined, vector, expected); + assert!(!svptest_any(defined, cmp)) +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_f64_with_svstnt1_scatter_u64index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_f64_with_svstnt1_scatter_s64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_f32_with_svstnt1_scatter_u32offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_f64_with_svstnt1_scatter_u64offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + svstnt1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = + svldnt1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_f64_with_svstnt1_scatter_u64base_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_f64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_s64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_u64(svptrue_b64(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_f32_with_svstnt1_scatter_u32base_index_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_index_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_index_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_index_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_f64_with_svstnt1_scatter_u64base_index_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_index_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_index_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_f32_with_svstnt1_scatter_u32base_offset_f32() { + let mut storage = [0 as f32; 320usize]; + let data = svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f32 || val == i as f32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_offset_s32() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_offset_u32() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svstnt1_scatter_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = svldnt1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + storage.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_f64_with_svstnt1_scatter_u64base_offset_f64() { + let mut storage = [0 as f64; 160usize]; + let data = svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as f64 || val == i as f64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_offset_s64() { + let mut storage = [0 as i64; 160usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i64 || val == i as i64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_offset_u64() { + let mut storage = [0 as u64; 160usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svstnt1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u64 || val == i as u64); + } + svsetffr(); + let loaded = svldnt1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() { + let mut storage = [0 as u8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u8 || val == i as u8); + } + svsetffr(); + let loaded = + svldnt1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svstnt1b_scatter_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + storage.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() { + let mut storage = [0 as i8; 1280usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i8 || val == i as i8); + } + svsetffr(); + let loaded = svldnt1ub_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_s64(0, 1); + svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() { + let mut storage = [0 as u16; 640usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u16 || val == i as u16); + } + svsetffr(); + let loaded = + svldnt1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() { + let mut storage = [0 as u32; 320usize]; + let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let indices = svindex_u64(0, 1); + svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as u32 || val == i as u32); + } + svsetffr(); + let loaded = + svldnt1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svstnt1h_scatter_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + data, + ); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + storage.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() { + let mut storage = [0 as i16; 640usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i16 || val == i as i16); + } + svsetffr(); + let loaded = svldnt1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve,sve2")] +unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() { + let mut storage = [0 as i32; 320usize]; + let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()); + let bases = svdup_n_u64(storage.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data); + for (i, &val) in storage.iter().enumerate() { + assert!(val == 0 as i32 || val == i as i32); + } + svsetffr(); + let loaded = svldnt1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} From 6a6e8446b97e8a3dfc0984660253b1ac437a445a Mon Sep 17 00:00:00 2001 From: David Wood Date: Sat, 28 Feb 2026 21:24:33 +0000 Subject: [PATCH 12/20] intrinsics_data: add sve intrinsics Co-authored-by: Adam Gemmell Co-authored-by: Jamie Cunliffe Co-authored-by: Jacob Bramley Co-authored-by: Luca Vizzarro --- .../intrinsics_data/arm_intrinsics.json | 211216 ++++++++++++++- 1 file changed, 208393 insertions(+), 2823 deletions(-) diff --git a/library/stdarch/intrinsics_data/arm_intrinsics.json b/library/stdarch/intrinsics_data/arm_intrinsics.json index bce85d19a10f..3a3b962a4873 100644 --- a/library/stdarch/intrinsics_data/arm_intrinsics.json +++ b/library/stdarch/intrinsics_data/arm_intrinsics.json @@ -224,21 +224,25 @@ ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f16", + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s16]", "arguments": [ - "float16x4_t a", - "int16x4_t b" + "svint16_t op1", + "svint16_t op2", + "int16_t op3" ], "return_type": { - "value": "float16x4_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.4H" + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" } }, "Architectures": [ @@ -246,26 +250,165075 @@ ], "instructions": [ [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABA" + ], + [ + "MOVPRFX", + "SABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaba[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABA" + ], + [ + "MOVPRFX", + "UABA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALB" + ], + [ + "MOVPRFX", + "SABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALB" + ], + [ + "MOVPRFX", + "UABALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABALT" + ], + [ + "MOVPRFX", + "SABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabalt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABALT" + ], + [ + "MOVPRFX", + "UABALT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABD" + ], + [ + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABD" + ], + [ + "MOVPRFX", + "FABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABD" + ], + [ + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SABD" + ], + [ + "MOVPRFX", + "SABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABD" + ], + [ + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UABD" + ], + [ + "MOVPRFX", + "UABD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svabdlt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UABDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FABS" + ], + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ABS" + ], + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svabs[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacge[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacgt[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svacle[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaclt[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FACGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADALP" + ], + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADALP" + ], + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadalp[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UADALP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLB" + ], + [ + "MOVPRFX", + "ADCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svadclt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADCLT" + ], + [ + "MOVPRFX", + "ADCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "SUB" + ], + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADD" + ], + [ + "ADD" + ], + [ + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ADD" + ], + [ + "MOVPRFX", + "ADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadda[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t initial", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "initial": { + "register": "Htied" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadda[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t initial", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "initial": { + "register": "Stied" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadda[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t initial", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "initial": { + "register": "Dtied" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlbt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddlt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDP" + ], + [ + "MOVPRFX", + "FADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADDP" + ], + [ + "MOVPRFX", + "ADDP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svaddv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaddwt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UADDWT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[s32]offset", + "arguments": [ + "svuint32_t bases", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offsets": { + "register": "Zoffsets.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrb[_u32base]_[u32]offset", + "arguments": [ + "svuint32_t bases", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offsets": { + "register": "Zoffsets.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[s64]offset", + "arguments": [ + "svuint64_t bases", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrb[_u64base]_[u64]offset", + "arguments": [ + "svuint64_t bases", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offsets": { + "register": "Zoffsets.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[s32]index", + "arguments": [ + "svuint32_t bases", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrd[_u32base]_[u32]index", + "arguments": [ + "svuint32_t bases", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[s64]index", + "arguments": [ + "svuint64_t bases", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrd[_u64base]_[u64]index", + "arguments": [ + "svuint64_t bases", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[s32]index", + "arguments": [ + "svuint32_t bases", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrh[_u32base]_[u32]index", + "arguments": [ + "svuint32_t bases", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[s64]index", + "arguments": [ + "svuint64_t bases", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrh[_u64base]_[u64]index", + "arguments": [ + "svuint64_t bases", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[s32]index", + "arguments": [ + "svuint32_t bases", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrw[_u32base]_[u32]index", + "arguments": [ + "svuint32_t bases", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[s64]index", + "arguments": [ + "svuint64_t bases", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svadrw[_u64base]_[u64]index", + "arguments": [ + "svuint64_t bases", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ADR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaesd[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AESD" + ], + [ + "AESD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaese[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AESE" + ], + [ + "AESE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaesimc[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AESIMC" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svaesmc[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AESMC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "UXTH" + ], + [ + "UXTW" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ], + [ + "MOVPRFX", + "UXTH" + ], + [ + "MOVPRFX", + "UXTW" + ], + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "AND" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svand[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "AND" + ], + [ + "MOVPRFX", + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svandv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ANDV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASRR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASR" + ], + [ + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasr_wide[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ASRD" + ], + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svasrd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ASRD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbcax[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BCAX" + ], + [ + "MOVPRFX", + "BCAX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbdep[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BDEP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbext[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbgrp[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BGRP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ], + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BIC" + ], + [ + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbic[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "BIC" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_m", + "arguments": [ + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ptied.B" + }, + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrka[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_m", + "arguments": [ + "svbool_t inactive", + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ptied.B" + }, + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrkb[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrkn[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Ptied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrkpa[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svbrkpb[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BRKPB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl1n[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL1N" + ], + [ + "MOVPRFX", + "BSL1N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl2n[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL2N" + ], + [ + "MOVPRFX", + "BSL2N" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svbsl[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "BSL" + ], + [ + "MOVPRFX", + "BSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCADD" + ], + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcadd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcadd[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CADD" + ], + [ + "MOVPRFX", + "CADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcdot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcdot[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcdot_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CDOT" + ], + [ + "MOVPRFX", + "CDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_f16]", + "arguments": [ + "svfloat16_t op", + "svfloat16_t min", + "svfloat16_t max" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.H" + }, + "min": { + "register": "Zreg2.H" + }, + "op": { + "register": "Zreg1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_f32]", + "arguments": [ + "svfloat32_t op", + "svfloat32_t min", + "svfloat32_t max" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.S" + }, + "min": { + "register": "Zreg2.S" + }, + "op": { + "register": "Zreg1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_f64]", + "arguments": [ + "svfloat64_t op", + "svfloat64_t min", + "svfloat64_t max" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.D" + }, + "min": { + "register": "Zreg2.D" + }, + "op": { + "register": "Zreg1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_s16]", + "arguments": [ + "svint16_t op", + "svint16_t min", + "svint16_t max" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.H" + }, + "min": { + "register": "Zreg2.H" + }, + "op": { + "register": "Zreg1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_s32]", + "arguments": [ + "svint32_t op", + "svint32_t min", + "svint32_t max" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.S" + }, + "min": { + "register": "Zreg2.S" + }, + "op": { + "register": "Zreg1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_s64]", + "arguments": [ + "svint64_t op", + "svint64_t min", + "svint64_t max" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.D" + }, + "min": { + "register": "Zreg2.D" + }, + "op": { + "register": "Zreg1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_s8]", + "arguments": [ + "svint8_t op", + "svint8_t min", + "svint8_t max" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_u16]", + "arguments": [ + "svuint16_t op", + "svuint16_t min", + "svuint16_t max" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.H" + }, + "min": { + "register": "Zreg2.H" + }, + "op": { + "register": "Zreg1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_u32]", + "arguments": [ + "svuint32_t op", + "svuint32_t min", + "svuint32_t max" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.S" + }, + "min": { + "register": "Zreg2.S" + }, + "op": { + "register": "Zreg1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_u64]", + "arguments": [ + "svuint64_t op", + "svuint64_t min", + "svuint64_t max" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.D" + }, + "min": { + "register": "Zreg2.D" + }, + "op": { + "register": "Zreg1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svclamp[_u8]", + "arguments": [ + "svuint8_t op", + "svuint8_t min", + "svuint8_t max" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "max": { + "register": "Zreg3.B" + }, + "min": { + "register": "Zreg2.B" + }, + "op": { + "register": "Zreg1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCLAMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f16]", + "arguments": [ + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f32]", + "arguments": [ + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_f64]", + "arguments": [ + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s16]", + "arguments": [ + "svbool_t pg", + "int16_t fallback", + "svint16_t data" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s32]", + "arguments": [ + "svbool_t pg", + "int32_t fallback", + "svint32_t data" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s64]", + "arguments": [ + "svbool_t pg", + "int64_t fallback", + "svint64_t data" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_s8]", + "arguments": [ + "svbool_t pg", + "int8_t fallback", + "svint8_t data" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_n_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclasta[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTA" + ], + [ + "MOVPRFX", + "CLASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t fallback", + "svfloat16_t data" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t fallback", + "svfloat32_t data" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t fallback", + "svfloat64_t data" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f16]", + "arguments": [ + "svbool_t pg", + "float16_t fallback", + "svfloat16_t data" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f32]", + "arguments": [ + "svbool_t pg", + "float32_t fallback", + "svfloat32_t data" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_f64]", + "arguments": [ + "svbool_t pg", + "float64_t fallback", + "svfloat64_t data" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s16]", + "arguments": [ + "svbool_t pg", + "int16_t fallback", + "svint16_t data" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s32]", + "arguments": [ + "svbool_t pg", + "int32_t fallback", + "svint32_t data" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s64]", + "arguments": [ + "svbool_t pg", + "int64_t fallback", + "svint64_t data" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_s8]", + "arguments": [ + "svbool_t pg", + "int8_t fallback", + "svint8_t data" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t fallback", + "svuint16_t data" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Htied|Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t fallback", + "svuint32_t data" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Stied|Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t fallback", + "svuint64_t data" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Dtied|Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_n_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t fallback", + "svuint8_t data" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Btied|Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t fallback", + "svint16_t data" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t fallback", + "svint32_t data" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t fallback", + "svint64_t data" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t fallback", + "svint8_t data" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t fallback", + "svuint16_t data" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Zfallback.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t fallback", + "svuint32_t data" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Zfallback.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t fallback", + "svuint64_t data" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Zfallback.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclastb[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t fallback", + "svuint8_t data" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Zfallback.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLASTB" + ], + [ + "MOVPRFX", + "CLASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLS" + ], + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcls[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CLZ" + ], + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svclz[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CLZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmla_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLA" + ], + [ + "MOVPRFX", + "FCMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcmla_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMLA" + ], + [ + "MOVPRFX", + "CMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ], + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ], + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMEQ" + ], + [ + "FCMEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ], + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpeq_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPEQ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpge_wide[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpgt_wide[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLE" + ], + [ + "FCMGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ], + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ], + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmple_wide[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMLT" + ], + [ + "FCMGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPGT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ], + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ], + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmplt_wide[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ], + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ], + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMNE" + ], + [ + "FCMNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_n_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_n_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ], + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpne_wide[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CMPNE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcmpuo[_n_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCMUO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNOT" + ], + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnot[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNT" + ], + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnt[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "CNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntb", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntb_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntd_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcnth_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntp_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.H" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.S" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.D" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcntp_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t vl" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "pnn": { + "register": "PNreg1.B" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw", + "arguments": [], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcntw_pat", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcompact[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "COMPACT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate2[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate2[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate3[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcreate4[_b]", + "arguments": [ + "svbool_t x", + "svbool_t y", + "svbool_t z", + "svbool_t w" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f16]", + "arguments": [ + "svfloat16_t x0", + "svfloat16_t x1", + "svfloat16_t x2", + "svfloat16_t x3" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f32]", + "arguments": [ + "svfloat32_t x0", + "svfloat32_t x1", + "svfloat32_t x2", + "svfloat32_t x3" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_f64]", + "arguments": [ + "svfloat64_t x0", + "svfloat64_t x1", + "svfloat64_t x2", + "svfloat64_t x3" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s16]", + "arguments": [ + "svint16_t x0", + "svint16_t x1", + "svint16_t x2", + "svint16_t x3" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s32]", + "arguments": [ + "svint32_t x0", + "svint32_t x1", + "svint32_t x2", + "svint32_t x3" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s64]", + "arguments": [ + "svint64_t x0", + "svint64_t x1", + "svint64_t x2", + "svint64_t x3" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_s8]", + "arguments": [ + "svint8_t x0", + "svint8_t x1", + "svint8_t x2", + "svint8_t x3" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u16]", + "arguments": [ + "svuint16_t x0", + "svuint16_t x1", + "svuint16_t x2", + "svuint16_t x3" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u32]", + "arguments": [ + "svuint32_t x0", + "svuint32_t x1", + "svuint32_t x2", + "svuint32_t x3" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u64]", + "arguments": [ + "svuint64_t x0", + "svuint64_t x1", + "svuint64_t x2", + "svuint64_t x3" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcreate4[_u8]", + "arguments": [ + "svuint8_t x0", + "svuint8_t x1", + "svuint8_t x2", + "svuint8_t x3" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f16[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f32[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVT" + ], + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SCVTF" + ], + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UCVTF" + ], + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_f64[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UCVTF" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ], + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_s64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u16[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ], + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svcvt_u64[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTZU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f32[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtlt_f64[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_m", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f16[_f32]_x", + "arguments": [ + "svfloat16_t even", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTX" + ], + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtx_f32[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FCVTX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_m", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svcvtxnt_f32[_f64]_x", + "arguments": [ + "svfloat32_t even", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTXNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIV" + ], + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIV" + ], + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIV" + ], + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdiv[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDIVR" + ], + [ + "FDIV" + ], + [ + "MOVPRFX", + "FDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FDIVR" + ], + [ + "MOVPRFX", + "FDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDIVR" + ], + [ + "SDIV" + ], + [ + "MOVPRFX", + "SDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SDIVR" + ], + [ + "MOVPRFX", + "SDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDIVR" + ], + [ + "UDIV" + ], + [ + "MOVPRFX", + "UDIVR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdivr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UDIVR" + ], + [ + "MOVPRFX", + "UDIV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_f32_f16]", + "arguments": [ + "svfloat32_t zda", + "svfloat16_t zn", + "svfloat16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_s32_s16]", + "arguments": [ + "svint32_t zda", + "svint16_t zn", + "svint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SDOT" + ], + [ + "MOVPRFX", + "SDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svdot_lane[_u32_u16]", + "arguments": [ + "svuint32_t zda", + "svuint16_t zn", + "svuint16_t zm", + "uint64_t imm_idx" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_idx": { + "immediate": "imm1" + }, + "zda": { + "register": "Zreg1.S" + }, + "zm": { + "register": "Zreg3.H" + }, + "zn": { + "register": "Zreg2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdot_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UDOT" + ], + [ + "MOVPRFX", + "UDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b16", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b32", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b64", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_b8", + "arguments": [ + "bool op" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16", + "arguments": [ + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_x", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f16_z", + "arguments": [ + "svbool_t pg", + "float16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32", + "arguments": [ + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_x", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f32_z", + "arguments": [ + "svbool_t pg", + "float32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64", + "arguments": [ + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_x", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_f64_z", + "arguments": [ + "svbool_t pg", + "float64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16", + "arguments": [ + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_x", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s16_z", + "arguments": [ + "svbool_t pg", + "int16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32", + "arguments": [ + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_x", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s32_z", + "arguments": [ + "svbool_t pg", + "int32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64", + "arguments": [ + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_x", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s64_z", + "arguments": [ + "svbool_t pg", + "int64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8", + "arguments": [ + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_x", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_s8_z", + "arguments": [ + "svbool_t pg", + "int8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16", + "arguments": [ + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.H" + }, + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_x", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u16_z", + "arguments": [ + "svbool_t pg", + "uint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Hop|Wop" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32", + "arguments": [ + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.S" + }, + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_x", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u32_z", + "arguments": [ + "svbool_t pg", + "uint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Sop|Wop" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64", + "arguments": [ + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.D" + }, + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_x", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u64_z", + "arguments": [ + "svbool_t pg", + "uint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Dop|Xop" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8", + "arguments": [ + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Ztied.B" + }, + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "FCPY" + ], + [ + "CPY" + ], + [ + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_x", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP" + ], + [ + "FCPY" + ], + [ + "FDUP" + ], + [ + "DUPM" + ], + [ + "DUP" + ], + [ + "DUP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup[_n]_u8_z", + "arguments": [ + "svbool_t pg", + "uint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Bop|Wop" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CPY" + ], + [ + "DUP", + "FCPY" + ], + [ + "DUP", + "CPY" + ], + [ + "MOVPRFX", + "CPY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint16_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "index": { + "register": "Zindex.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint32_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "index": { + "register": "Zindex.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "index": { + "register": "Zindex.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdup_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint8_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "index": { + "register": "Zindex.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b16", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b32", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b64", + "arguments": [ + "bool x0", + "bool x1" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_b8", + "arguments": [ + "bool x0", + "bool x1", + "bool x2", + "bool x3", + "bool x4", + "bool x5", + "bool x6", + "bool x7", + "bool x8", + "bool x9", + "bool x10", + "bool x11", + "bool x12", + "bool x13", + "bool x14", + "bool x15" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f16", + "arguments": [ + "float16_t x0", + "float16_t x1", + "float16_t x2", + "float16_t x3", + "float16_t x4", + "float16_t x5", + "float16_t x6", + "float16_t x7" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f32", + "arguments": [ + "float32_t x0", + "float32_t x1", + "float32_t x2", + "float32_t x3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_f64", + "arguments": [ + "float64_t x0", + "float64_t x1" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s16", + "arguments": [ + "int16_t x0", + "int16_t x1", + "int16_t x2", + "int16_t x3", + "int16_t x4", + "int16_t x5", + "int16_t x6", + "int16_t x7" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s32", + "arguments": [ + "int32_t x0", + "int32_t x1", + "int32_t x2", + "int32_t x3" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s64", + "arguments": [ + "int64_t x0", + "int64_t x1" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_s8", + "arguments": [ + "int8_t x0", + "int8_t x1", + "int8_t x2", + "int8_t x3", + "int8_t x4", + "int8_t x5", + "int8_t x6", + "int8_t x7", + "int8_t x8", + "int8_t x9", + "int8_t x10", + "int8_t x11", + "int8_t x12", + "int8_t x13", + "int8_t x14", + "int8_t x15" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u16", + "arguments": [ + "uint16_t x0", + "uint16_t x1", + "uint16_t x2", + "uint16_t x3", + "uint16_t x4", + "uint16_t x5", + "uint16_t x6", + "uint16_t x7" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u32", + "arguments": [ + "uint32_t x0", + "uint32_t x1", + "uint32_t x2", + "uint32_t x3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u64", + "arguments": [ + "uint64_t x0", + "uint64_t x1" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq[_n]_u8", + "arguments": [ + "uint8_t x0", + "uint8_t x1", + "uint8_t x2", + "uint8_t x3", + "uint8_t x4", + "uint8_t x5", + "uint8_t x6", + "uint8_t x7", + "uint8_t x8", + "uint8_t x9", + "uint8_t x10", + "uint8_t x11", + "uint8_t x12", + "uint8_t x13", + "uint8_t x14", + "uint8_t x15" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f16]", + "arguments": [ + "svfloat16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f32]", + "arguments": [ + "svfloat32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_f64]", + "arguments": [ + "svfloat64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s16]", + "arguments": [ + "svint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s32]", + "arguments": [ + "svint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s64]", + "arguments": [ + "svint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_s8]", + "arguments": [ + "svint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u16]", + "arguments": [ + "svuint16_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u32]", + "arguments": [ + "svuint32_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u64]", + "arguments": [ + "svuint64_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svdupq_lane[_u8]", + "arguments": [ + "svuint8_t data", + "uint64_t index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D|Zdata.Q" + }, + "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": { + "register": "Zindices_d.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "DUP" + ], + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveor3[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "EOR3" + ], + [ + "MOVPRFX", + "EOR3" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ], + [ + "EOR" + ], + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveor[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "EOR" + ], + [ + "MOVPRFX", + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_n_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s16]", + "arguments": [ + "svint16_t odd", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s32]", + "arguments": [ + "svint32_t odd", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s64]", + "arguments": [ + "svint64_t odd", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_s8]", + "arguments": [ + "svint8_t odd", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u16]", + "arguments": [ + "svuint16_t odd", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u32]", + "arguments": [ + "svuint32_t odd", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u64]", + "arguments": [ + "svuint64_t odd", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveorbt[_u8]", + "arguments": [ + "svuint8_t odd", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "odd": { + "register": "Zodd.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORBT" + ], + [ + "MOVPRFX", + "EORBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_n_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s16]", + "arguments": [ + "svint16_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s32]", + "arguments": [ + "svint32_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s64]", + "arguments": [ + "svint64_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_s8]", + "arguments": [ + "svint8_t even", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u16]", + "arguments": [ + "svuint16_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.H|Ztied.H" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u32]", + "arguments": [ + "svuint32_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.S|Ztied.S" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u64]", + "arguments": [ + "svuint64_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.D|Ztied.D" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "sveortb[_u8]", + "arguments": [ + "svuint8_t even", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Zeven.B|Ztied.B" + }, + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORTB" + ], + [ + "MOVPRFX", + "EORTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "sveorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexpa[_f64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FEXPA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 127 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svext[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 255 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EXT" + ], + [ + "MOVPRFX", + "EXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTB" + ], + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTB" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTH" + ], + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTH" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svexth[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SXTW" + ], + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UXTW" + ], + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svextw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UXTW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svget4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svget4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHADD" + ], + [ + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHADD" + ], + [ + "MOVPRFX", + "SHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHADD" + ], + [ + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHADD" + ], + [ + "MOVPRFX", + "UHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistcnt[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTCNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhistseg[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "HISTSEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUB" + ], + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUB" + ], + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHSUBR" + ], + [ + "SHSUB" + ], + [ + "MOVPRFX", + "SHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SHSUBR" + ], + [ + "MOVPRFX", + "SHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UHSUBR" + ], + [ + "UHSUB" + ], + [ + "MOVPRFX", + "UHSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svhsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UHSUBR" + ], + [ + "MOVPRFX", + "UHSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s16", + "arguments": [ + "int16_t base", + "int16_t step" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s32", + "arguments": [ + "int32_t base", + "int32_t step" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s64", + "arguments": [ + "int64_t base", + "int64_t step" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_s8", + "arguments": [ + "int8_t base", + "int8_t step" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u16", + "arguments": [ + "uint16_t base", + "uint16_t step" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u32", + "arguments": [ + "uint32_t base", + "uint32_t step" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u64", + "arguments": [ + "uint64_t base", + "uint64_t step" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "step": { + "register": "Xstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svindex_u8", + "arguments": [ + "uint8_t base", + "uint8_t step" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Wbase" + }, + "step": { + "register": "Wstep" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ], + [ + "INDEX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f16]", + "arguments": [ + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_f64]", + "arguments": [ + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Hop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Sop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Dop2|Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svinsr[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Bop2|Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "INSR" + ], + [ + "INSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlasta[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTA" + ], + [ + "LASTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlastb[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LASTB" + ], + [ + "LASTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1D" + ], + [ + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LD1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svld1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROH" + ], + [ + "LD1ROH" + ], + [ + "LD1ROH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROW" + ], + [ + "LD1ROW" + ], + [ + "LD1ROW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROD" + ], + [ + "LD1ROD" + ], + [ + "LD1ROD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ro[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1ROB" + ], + [ + "LD1ROB" + ], + [ + "LD1ROB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQH" + ], + [ + "LD1RQH" + ], + [ + "LD1RQH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQW" + ], + [ + "LD1RQW" + ], + [ + "LD1RQW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQD" + ], + [ + "LD1RQD" + ], + [ + "LD1RQD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1rq[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1RQB" + ], + [ + "LD1RQB" + ], + [ + "LD1RQB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SB" + ], + [ + "LD1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SH" + ], + [ + "LD1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1SW" + ], + [ + "LD1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1B" + ], + [ + "LD1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1H" + ], + [ + "LD1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD1W" + ], + [ + "LD1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2H" + ], + [ + "LD2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2W" + ], + [ + "LD2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2D" + ], + [ + "LD2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD2B" + ], + [ + "LD2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3H" + ], + [ + "LD3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3W" + ], + [ + "LD3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3D" + ], + [ + "LD3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD3B" + ], + [ + "LD3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4H" + ], + [ + "LD4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4W" + ], + [ + "LD4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4D" + ], + [ + "LD4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svld4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LD4B" + ], + [ + "LD4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ], + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ], + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ], + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ], + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ], + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]index_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ], + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ], + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldff1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDFF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 8": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1D" + ], + [ + "LDNF1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntb()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u16", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sb_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SB" + ], + [ + "LDNF1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SH" + ], + [ + "LDNF1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1sw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1SW" + ], + [ + "LDNF1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u16", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcnth()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1ub_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd()": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1B" + ], + [ + "LDNF1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntw() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uh_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 2": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1H" + ], + [ + "LDNF1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnf1uw_vnum_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "base + vnum * svcntd() * 4": { + "register": "Xptr" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNF1W" + ], + [ + "LDNF1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_f32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_f64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_gather_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "const float16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x2", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f16]_x4", + "arguments": [ + "svcount_t png", + "float16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "const float32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x2", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f32]_x4", + "arguments": [ + "svcount_t png", + "float32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "const float64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x2", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_f64]_x4", + "arguments": [ + "svcount_t png", + "float64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x2", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s16]_x4", + "arguments": [ + "svcount_t png", + "int16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x2", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s32]_x4", + "arguments": [ + "svcount_t png", + "int32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "const int64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x2", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s64]_x4", + "arguments": [ + "svcount_t png", + "int64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x2", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_s8]_x4", + "arguments": [ + "svcount_t png", + "int8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ], + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x2", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u16]_x4", + "arguments": [ + "svcount_t png", + "uint16_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ], + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x2", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u32]_x4", + "arguments": [ + "svcount_t png", + "uint32_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "const uint64_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1D" + ], + [ + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x2", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u64]_x4", + "arguments": [ + "svcount_t png", + "uint64_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "LDNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svldnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ], + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x2", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1_vnum[_u8]_x4", + "arguments": [ + "svcount_t png", + "uint8_t const * rn", + "int64_t vnum" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "vnum": { + "register": "Xreg3" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MUL", + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sb_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1sw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const int32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1SW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1ub_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint8_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_index_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_offset_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_s32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u32base]_u32", + "arguments": [ + "svbool_t pg", + "svuint32_t bases" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_s32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u32]offset_u32", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint32_t offsets" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uh_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint16_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_index_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_offset_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_s64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather[_u64base]_u64", + "arguments": [ + "svbool_t pg", + "svuint64_t bases" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[s64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]index_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_s64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svldnt1uw_gather_[u64]offset_u64", + "arguments": [ + "svbool_t pg", + "const uint32_t *base", + "svuint64_t offsets" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlen[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "CNTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FLOGB" + ], + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svlogb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FLOGB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSLR" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSLR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsl_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSRR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSRR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSR" + ], + [ + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svlsr_wide[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmad[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ], + [ + "MOVPRFX", + "FMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAX" + ], + [ + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMAX" + ], + [ + "MOVPRFX", + "SMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAX" + ], + [ + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmax[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMAX" + ], + [ + "MOVPRFX", + "UMAX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ], + [ + "MOVPRFX", + "FMAXNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMP" + ], + [ + "MOVPRFX", + "FMAXNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXP" + ], + [ + "MOVPRFX", + "FMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXP" + ], + [ + "MOVPRFX", + "SMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmaxp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXP" + ], + [ + "MOVPRFX", + "UMAXP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmaxv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMAXV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ], + [ + "MOVPRFX", + "FMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMIN" + ], + [ + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMIN" + ], + [ + "MOVPRFX", + "SMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMIN" + ], + [ + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmin[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMIN" + ], + [ + "MOVPRFX", + "UMIN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnm[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ], + [ + "MOVPRFX", + "FMINNM" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminnmp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMP" + ], + [ + "MOVPRFX", + "FMINNMP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminnmv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINNMV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINP" + ], + [ + "MOVPRFX", + "FMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINP" + ], + [ + "MOVPRFX", + "SMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svminp[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINP" + ], + [ + "MOVPRFX", + "UMINP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "float16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "float32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "float64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svminv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMINV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "FMAD" + ], + [ + "FMAD" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLA" + ], + [ + "MOVPRFX", + "FMAD" + ], + [ + "MOVPRFX", + "FMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MAD" + ], + [ + "MAD" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLA" + ], + [ + "MOVPRFX", + "MAD" + ], + [ + "MOVPRFX", + "MAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmla_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLA" + ], + [ + "MOVPRFX", + "FMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmla_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLA" + ], + [ + "MOVPRFX", + "MLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALB" + ], + [ + "MOVPRFX", + "FMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALB" + ], + [ + "MOVPRFX", + "SMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALB" + ], + [ + "MOVPRFX", + "UMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLALT" + ], + [ + "MOVPRFX", + "FMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLALT" + ], + [ + "MOVPRFX", + "SMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlalt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLALT" + ], + [ + "MOVPRFX", + "UMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmls_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmls_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MLS" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLB" + ], + [ + "MOVPRFX", + "FMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLB" + ], + [ + "MOVPRFX", + "SMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslb_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLB" + ], + [ + "MOVPRFX", + "UMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat16_t op2", + "svfloat16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMLSLT" + ], + [ + "MOVPRFX", + "FMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMLSLT" + ], + [ + "MOVPRFX", + "SMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2", + "svuint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmlslt_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2", + "svuint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMLSLT" + ], + [ + "MOVPRFX", + "UMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMMLA" + ], + [ + "MOVPRFX", + "FMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMMLA" + ], + [ + "MOVPRFX", + "SMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmmla[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMMLA" + ], + [ + "MOVPRFX", + "UMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmov[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "AND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlb[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmovlt[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMSB" + ], + [ + "FMSB" + ], + [ + "FMLS" + ], + [ + "MOVPRFX", + "FMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMSB" + ], + [ + "MOVPRFX", + "FMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B[*]|Ztied3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "op3": { + "register": "Zop3.B|Ztied3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MSB" + ], + [ + "MSB" + ], + [ + "MLS" + ], + [ + "MOVPRFX", + "MSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmsb[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MSB" + ], + [ + "MOVPRFX", + "MLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ], + [ + "MOVPRFX", + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ], + [ + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "MUL" + ], + [ + "MOVPRFX", + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmul_lane[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmul_lane[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULH" + ], + [ + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SMULH" + ], + [ + "MOVPRFX", + "SMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULH" + ], + [ + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulh[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UMULH" + ], + [ + "MOVPRFX", + "UMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullb_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svmullt_lane[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FMULX" + ], + [ + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svmulx[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FMULX" + ], + [ + "MOVPRFX", + "FMULX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnand[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NAND" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "svuint16_t op3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnbsl[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NBSL" + ], + [ + "MOVPRFX", + "NBSL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNEG" + ], + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NEG" + ], + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NEG" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmad[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svnmatch[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NMATCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLA" + ], + [ + "FNMAD" + ], + [ + "FNMAD" + ], + [ + "MOVPRFX", + "FNMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmla[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLA" + ], + [ + "MOVPRFX", + "FNMAD" + ], + [ + "MOVPRFX", + "FNMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMLS" + ], + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmls[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H|Ztied3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "svfloat16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S|Ztied3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "svfloat32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D|Ztied3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "svfloat64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "op3": { + "register": "Zop3.H[*]|Ztied3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2", + "float16_t op3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "op3": { + "register": "Zop3.S[*]|Ztied3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2", + "float32_t op3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "op3": { + "register": "Zop3.D[*]|Ztied3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FNMSB" + ], + [ + "FNMSB" + ], + [ + "FNMLS" + ], + [ + "MOVPRFX", + "FNMSB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnmsb[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2", + "float64_t op3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMSB" + ], + [ + "MOVPRFX", + "FNMLS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnor[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "EOR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "NOT" + ], + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svnot[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "NOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorn[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_b]_z", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORR" + ], + [ + "ORR" + ], + [ + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "ORR" + ], + [ + "MOVPRFX", + "ORR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "int16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "int8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "uint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svorv[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "uint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ORV" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c16_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c32_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c64_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpext_lane_c8_x2", + "arguments": [ + "svcount_t pnn", + "uint64_t imm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "pnn": { + "register": "PNreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfalse[_b]", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpfalse_c", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFALSE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpfirst[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PFIRST" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmul[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullb_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpmullt_pair[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b16", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b32", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b64", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svpnext_b8", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Ptied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PNEXT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u32base]_offset", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather[_u64base]_offset", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[s64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u32]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_gather_[u64]offset", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t offsets", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfb_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFB" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfd_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFD" + ], + [ + "PRFD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfh_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFH" + ], + [ + "PRFH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw", + "arguments": [ + "svbool_t pg", + "const void *base", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u32base]_index", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather[_u64base]_index", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[s64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u32]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint32_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_gather_[u64]index", + "arguments": [ + "svbool_t pg", + "const void *base", + "svuint64_t indices", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svprfw_vnum", + "arguments": [ + "svbool_t pg", + "const void *base", + "int64_t vnum", + "enum svprfop op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PRFW" + ], + [ + "PRFW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b16", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b32", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b64", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_b8", + "arguments": [ + "svbool_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c16", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c32", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c64", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svpsel_lane_c8", + "arguments": [ + "svcount_t pn", + "svbool_t pm", + "uint32_t idx" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "idx": { + "index": "[Wreg1, imm1]" + }, + "pm": { + "register": "Preg3" + }, + "pn": { + "register": "Preg2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_any", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_first", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptest_last", + "arguments": [ + "svbool_t pg", + "svbool_t op" + ], + "return_type": { + "value": "bool" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b16", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b32", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b64", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_b8", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c16", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c32", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c64", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svptrue_c8", + "arguments": [], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b16", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b32", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b64", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svptrue_pat_b8", + "arguments": [ + "enum svpattern pattern" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PTRUE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQABS" + ], + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqabs[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQABS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SQADD" + ], + [ + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQADD" + ], + [ + "MOVPRFX", + "SQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqadd[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "UQADD" + ], + [ + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQADD" + ], + [ + "MOVPRFX", + "UQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcadd[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCADD" + ], + [ + "MOVPRFX", + "SQCADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQCVTUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqcvtn_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQCVTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECD" + ], + [ + "MOVPRFX", + "SQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECD" + ], + [ + "MOVPRFX", + "UQDECD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECH" + ], + [ + "MOVPRFX", + "SQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdech_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECH" + ], + [ + "MOVPRFX", + "UQDECH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECP" + ], + [ + "MOVPRFX", + "SQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECP" + ], + [ + "MOVPRFX", + "UQDECP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDECW" + ], + [ + "MOVPRFX", + "SQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqdecw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQDECW" + ], + [ + "MOVPRFX", + "UQDECW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALB" + ], + [ + "MOVPRFX", + "SQDMLALB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALBT" + ], + [ + "MOVPRFX", + "SQDMLALBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlalt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLALT" + ], + [ + "MOVPRFX", + "SQDMLALT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslb_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLB" + ], + [ + "MOVPRFX", + "SQDMLSLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslbt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLBT" + ], + [ + "MOVPRFX", + "SQDMLSLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmlslt_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMLSLT" + ], + [ + "MOVPRFX", + "SQDMLSLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullb_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqdmullt_lane[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQDMULLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincb_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_s64]", + "arguments": [ + "svint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd[_u64]", + "arguments": [ + "svuint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_s64]", + "arguments": [ + "svint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCD" + ], + [ + "MOVPRFX", + "SQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincd_pat[_u64]", + "arguments": [ + "svuint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.D|Ztied.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCD" + ], + [ + "MOVPRFX", + "UQINCD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_s16]", + "arguments": [ + "svint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch[_u16]", + "arguments": [ + "svuint16_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_s16]", + "arguments": [ + "svint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCH" + ], + [ + "MOVPRFX", + "SQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqinch_pat[_u16]", + "arguments": [ + "svuint16_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.H|Ztied.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCH" + ], + [ + "MOVPRFX", + "UQINCH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b16", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b32", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b64", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s32]_b8", + "arguments": [ + "int32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b16", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b32", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b64", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_s64]_b8", + "arguments": [ + "int64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b16", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b32", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b64", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u32]_b8", + "arguments": [ + "uint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Wtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b16", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b32", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b64", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_n_u64]_b8", + "arguments": [ + "uint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Xtied" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s16]", + "arguments": [ + "svint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s32]", + "arguments": [ + "svint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_s64]", + "arguments": [ + "svint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCP" + ], + [ + "MOVPRFX", + "SQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u16]", + "arguments": [ + "svuint16_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u32]", + "arguments": [ + "svuint32_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincp[_u64]", + "arguments": [ + "svuint64_t op", + "svbool_t pg" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCP" + ], + [ + "MOVPRFX", + "UQINCP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s32]", + "arguments": [ + "int32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_s64]", + "arguments": [ + "int64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u32]", + "arguments": [ + "uint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_n_u64]", + "arguments": [ + "uint64_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_s32]", + "arguments": [ + "svint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw[_u32]", + "arguments": [ + "svuint32_t op", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s32]", + "arguments": [ + "int32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_s64]", + "arguments": [ + "int64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u32]", + "arguments": [ + "uint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Wtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_n_u64]", + "arguments": [ + "uint64_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Xtied" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_s32]", + "arguments": [ + "svint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQINCW" + ], + [ + "MOVPRFX", + "SQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqincw_pat[_u32]", + "arguments": [ + "svuint32_t op", + "enum svpattern pattern", + "uint64_t imm_factor" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm_factor": { + "minimum": 1, + "maximum": 16 + }, + "op": { + "register": "Zop.S|Ztied.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQINCW" + ], + [ + "MOVPRFX", + "UQINCW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQNEG" + ], + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqneg[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQNEG" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdcmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index", + "uint64_t imm_rotation" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDCMLAH" + ], + [ + "MOVPRFX", + "SQRDCMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlah_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLAH" + ], + [ + "MOVPRFX", + "SQRDMLAH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "int16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "int32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "int64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "svint16_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "op3": { + "register": "Zop3.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "svint32_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmlsh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "svint64_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMLSH" + ], + [ + "MOVPRFX", + "SQRDMLSH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrdmulh_lane[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 1 + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRDMULH" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SRSHR" + ], + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "URSHR" + ], + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHL" + ], + [ + "SQRSHLR" + ], + [ + "MOVPRFX", + "SQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQRSHL" + ], + [ + "MOVPRFX", + "SQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHL" + ], + [ + "UQRSHLR" + ], + [ + "MOVPRFX", + "UQRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQRSHL" + ], + [ + "MOVPRFX", + "UQRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_s16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrn[_n]_u16[_u32_x2]", + "arguments": [ + "svuint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQRSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrun[_n]_u16[_s32_x2]", + "arguments": [ + "svint32x2_t zn", + "uint64_t imm" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm": { + "immediate": "imm1" + }, + "zn": { + "Z multi-vector": "{ Zreg2.S, Zreg3.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUN" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqrshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQRSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "ASR" + ], + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "ASR" + ], + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "LSR" + ], + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "LSR" + ], + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHL" + ], + [ + "SQSHLR" + ], + [ + "MOVPRFX", + "SQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHL" + ], + [ + "MOVPRFX", + "SQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHL" + ], + [ + "UQSHLR" + ], + [ + "MOVPRFX", + "UQSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSHL" + ], + [ + "MOVPRFX", + "UQSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHLU" + ], + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshlu[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSHLU" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqshrunt[_n_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSHRUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_s8]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQADD" + ], + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_n_u8]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUB" + ], + [ + "SQSUBR" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUB" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svqsub[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUB" + ], + [ + "UQSUBR" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUB" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQSUBR" + ], + [ + "SQSUB" + ], + [ + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SQSUBR" + ], + [ + "MOVPRFX", + "SQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQSUBR" + ], + [ + "UQSUB" + ], + [ + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "UQSUBR" + ], + [ + "MOVPRFX", + "UQSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnb[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQXTNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunb[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s16]", + "arguments": [ + "svuint8_t even", + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s32]", + "arguments": [ + "svuint16_t even", + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svqxtunt[_s64]", + "arguments": [ + "svuint32_t even", + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQXTUNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svraddhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RADDHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrax1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RAX1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_m", + "arguments": [ + "svint8_t inactive", + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_m", + "arguments": [ + "svuint8_t inactive", + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.B|Ztied.B" + }, + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B|Ztied.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RBIT" + ], + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrbit[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "RBIT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr", + "arguments": [], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrdffr_z", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpe[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URECPE" + ], + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrecpe[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URECPE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecps[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRECPX" + ], + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrecpx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRECPX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_b]", + "arguments": [ + "svcount_t count" + ], + "return_type": { + "value": "svbool_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svreinterpret[_c]", + "arguments": [ + "svbool_t pg" + ], + "return_type": { + "value": "svcount_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_f64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_s8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u16[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u32[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u64[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svreinterpret_u8[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s16]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s32]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s64]", + "arguments": [ + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_s8]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u16]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u32]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u64]", + "arguments": [ + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev[_u8]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b16", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b32", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b64", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrev_b8", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REV" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_m", + "arguments": [ + "svint16_t inactive", + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_m", + "arguments": [ + "svuint16_t inactive", + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVB" + ], + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevb[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_m", + "arguments": [ + "svfloat16_t zd", + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t zn" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_m", + "arguments": [ + "svfloat32_t zd", + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t zn" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_m", + "arguments": [ + "svfloat64_t zd", + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t zn" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_m", + "arguments": [ + "svint16_t zd", + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t zn" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_m", + "arguments": [ + "svint32_t zd", + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t zn" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_m", + "arguments": [ + "svint64_t zd", + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t zn" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_m", + "arguments": [ + "svint8_t zd", + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t zn" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_m", + "arguments": [ + "svuint16_t zd", + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t zn" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_m", + "arguments": [ + "svuint32_t zd", + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t zn" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_m", + "arguments": [ + "svuint64_t zd", + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t zn" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_m", + "arguments": [ + "svuint8_t zd", + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zd": { + "register": "Zreg1.Q" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOV", + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrevd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t zn" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "pg": { + "register": "Preg1" + }, + "zn": { + "register": "Zreg2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_m", + "arguments": [ + "svint32_t inactive", + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVH" + ], + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevh[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVH" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_m", + "arguments": [ + "svint64_t inactive", + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_m", + "arguments": [ + "svuint64_t inactive", + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "REVW" + ], + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrevw[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "REVW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRHADD" + ], + [ + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRHADD" + ], + [ + "MOVPRFX", + "SRHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URHADD" + ], + [ + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrhadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URHADD" + ], + [ + "MOVPRFX", + "URHADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTA" + ], + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinta[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTI" + ], + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrinti[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTM" + ], + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintm[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTM" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTN" + ], + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintn[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTN" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTP" + ], + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintp[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTP" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTX" + ], + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintx[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRINTZ" + ], + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrintz[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FRINTZ" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "SRSHR" + ], + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LSL" + ], + [ + "URSHR" + ], + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "LSL" + ], + [ + "MOVPRFX", + "URSHR" + ], + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHL" + ], + [ + "SRSHLR" + ], + [ + "MOVPRFX", + "SRSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHL" + ], + [ + "MOVPRFX", + "SRSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHL" + ], + [ + "URSHLR" + ], + [ + "MOVPRFX", + "URSHL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshl[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHL" + ], + [ + "MOVPRFX", + "URSHLR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSHR" + ], + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SRSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSHR" + ], + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSHR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f16]", + "arguments": [ + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f32]", + "arguments": [ + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrte[_f64]", + "arguments": [ + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_m", + "arguments": [ + "svuint32_t inactive", + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSQRTE" + ], + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsqrte[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "URSQRTE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svrsqrts[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FRSQRTS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRSRA" + ], + [ + "MOVPRFX", + "SRSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "URSRA" + ], + [ + "MOVPRFX", + "URSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svrsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RSUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLB" + ], + [ + "MOVPRFX", + "SBCLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "svuint32_t op3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "op3": { + "register": "Zop3.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsbclt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "svuint64_t op3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "op3": { + "register": "Zop3.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SBCLT" + ], + [ + "MOVPRFX", + "SBCLT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", "FSCALE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f16", + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_x", "arguments": [ - "float16x8_t a", - "int16x8_t b" + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float16x8_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.8H" + "op1": { + "register": "Zop1.H|Ztied1.H" }, - "b": { - "register": "Vm.8H" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -274,25 +165327,33 @@ "instructions": [ [ "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscale_f32", + "SIMD_ISA": "SVE", + "name": "svscale[_f16]_z", "arguments": [ - "float32x2_t a", - "int32x2_t b" + "svbool_t pg", + "svfloat16_t op1", + "svint16_t op2" ], "return_type": { - "value": "float32x2_t" + "value": "svfloat16_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2S" + "op1": { + "register": "Zop1.H" }, - "b": { - "register": "Vm.2S" + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -300,26 +165361,31 @@ ], "instructions": [ [ + "MOVPRFX", "FSCALE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f32", + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_m", "arguments": [ - "float32x4_t a", - "int32x4_t b" + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float32x4_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.4S" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.4S" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -328,25 +165394,33 @@ "instructions": [ [ "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" ] ] }, { - "SIMD_ISA": "Neon", - "name": "vscaleq_f64", + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_x", "arguments": [ - "float64x2_t a", - "int64x2_t b" + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" ], "return_type": { - "value": "float64x2_t" + "value": "svfloat32_t" }, "Arguments_Preparation": { - "a": { - "register": "Vn.2D" + "op1": { + "register": "Zop1.S|Ztied1.S" }, - "b": { - "register": "Vm.2D" + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -355,6 +165429,37649 @@ "instructions": [ [ "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ], + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svscale[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_b]", + "arguments": [ + "svbool_t pg", + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsel[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset2[_b]", + "arguments": [ + "svboolx2_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f16]", + "arguments": [ + "svfloat16x2_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f32]", + "arguments": [ + "svfloat32x2_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_f64]", + "arguments": [ + "svfloat64x2_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s16]", + "arguments": [ + "svint16x2_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s32]", + "arguments": [ + "svint32x2_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s64]", + "arguments": [ + "svint64x2_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_s8]", + "arguments": [ + "svint8x2_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u16]", + "arguments": [ + "svuint16x2_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u32]", + "arguments": [ + "svuint32x2_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u64]", + "arguments": [ + "svuint64x2_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset2[_u8]", + "arguments": [ + "svuint8x2_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f16]", + "arguments": [ + "svfloat16x3_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f32]", + "arguments": [ + "svfloat32x3_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_f64]", + "arguments": [ + "svfloat64x3_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s16]", + "arguments": [ + "svint16x3_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s32]", + "arguments": [ + "svint32x3_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s64]", + "arguments": [ + "svint64x3_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_s8]", + "arguments": [ + "svint8x3_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u16]", + "arguments": [ + "svuint16x3_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u32]", + "arguments": [ + "svuint32x3_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u64]", + "arguments": [ + "svuint64x3_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset3[_u8]", + "arguments": [ + "svuint8x3_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svset4[_b]", + "arguments": [ + "svboolx4_t tuple", + "uint64_t imm_index", + "svbool_t x" + ], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f16]", + "arguments": [ + "svfloat16x4_t tuple", + "uint64_t imm_index", + "svfloat16_t x" + ], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f32]", + "arguments": [ + "svfloat32x4_t tuple", + "uint64_t imm_index", + "svfloat32_t x" + ], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_f64]", + "arguments": [ + "svfloat64x4_t tuple", + "uint64_t imm_index", + "svfloat64_t x" + ], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s16]", + "arguments": [ + "svint16x4_t tuple", + "uint64_t imm_index", + "svint16_t x" + ], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s32]", + "arguments": [ + "svint32x4_t tuple", + "uint64_t imm_index", + "svint32_t x" + ], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s64]", + "arguments": [ + "svint64x4_t tuple", + "uint64_t imm_index", + "svint64_t x" + ], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_s8]", + "arguments": [ + "svint8x4_t tuple", + "uint64_t imm_index", + "svint8_t x" + ], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u16]", + "arguments": [ + "svuint16x4_t tuple", + "uint64_t imm_index", + "svuint16_t x" + ], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u32]", + "arguments": [ + "svuint32x4_t tuple", + "uint64_t imm_index", + "svuint32_t x" + ], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u64]", + "arguments": [ + "svuint64x4_t tuple", + "uint64_t imm_index", + "svuint64_t x" + ], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svset4[_u8]", + "arguments": [ + "svuint8x4_t tuple", + "uint64_t imm_index", + "svuint8_t x" + ], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsetffr", + "arguments": [], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": {}, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SETFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s16]", + "arguments": [ + "svint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s32]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_s64]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshllt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USHLLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "imm2": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "imm2": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svshrnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t imm2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "imm2": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SHRNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 15 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 31 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 63 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsli[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SLI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4e[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4E" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsm4ekey[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SM4EKEY" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f16]", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f32]", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_f64]", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s16]", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s32]", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s64]", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_s8]", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u16]", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsplice[_u8]", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SPLICE" + ], + [ + "MOVPRFX", + "SPLICE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UQADD" + ], + [ + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USQADD" + ], + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsqadd[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "USQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_m", + "arguments": [ + "svfloat16_t inactive", + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.H|Ztied.H" + }, + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H|Ztied.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_m", + "arguments": [ + "svfloat32_t inactive", + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.S|Ztied.S" + }, + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S|Ztied.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_m", + "arguments": [ + "svfloat64_t inactive", + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "inactive": { + "register": "Zinactive.D|Ztied.D" + }, + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D|Ztied.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSQRT" + ], + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsqrt[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSQRT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSRA" + ], + [ + "MOVPRFX", + "SSRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsra[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USRA" + ], + [ + "MOVPRFX", + "USRA" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsri[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SRI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t indices", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1D" + ], + [ + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svst1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1b_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1B" + ], + [ + "ST1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t indices", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]index[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t indices", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1h_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1H" + ], + [ + "ST1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst1w_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1W" + ], + [ + "ST1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2H" + ], + [ + "ST2H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2W" + ], + [ + "ST2W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2D" + ], + [ + "ST2D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst2_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x2_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST2B" + ], + [ + "ST2B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata2.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3H" + ], + [ + "ST3H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata2.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3W" + ], + [ + "ST3W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata2.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3D" + ], + [ + "ST3D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst3_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x3_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata2.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST3B" + ], + [ + "ST3B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.H - Zdata3.H}" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4H" + ], + [ + "ST4H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.S - Zdata3.S}" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4W" + ], + [ + "ST4W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.D - Zdata3.D}" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4D" + ], + [ + "ST4D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svst4_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8x4_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "{Zdata0.B - Zdata3.B}" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST4B" + ], + [ + "ST4B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg1" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_f32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 8": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_f64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "svuint32_t offsets", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t indices", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 8": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "svuint64_t offsets", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f16]", + "arguments": [ + "svbool_t pg", + "float16_t *base", + "int64_t vnum", + "svfloat16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x2]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f16_x4]", + "arguments": [ + "svcount_t png", + "float16_t * rn", + "int64_t vnum", + "svfloat16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f32]", + "arguments": [ + "svbool_t pg", + "float32_t *base", + "int64_t vnum", + "svfloat32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x2]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f32_x4]", + "arguments": [ + "svcount_t png", + "float32_t * rn", + "int64_t vnum", + "svfloat32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_f64]", + "arguments": [ + "svbool_t pg", + "float64_t *base", + "int64_t vnum", + "svfloat64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x2]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_f64_x4]", + "arguments": [ + "svcount_t png", + "float64_t * rn", + "int64_t vnum", + "svfloat64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s16]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "int64_t vnum", + "svint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x2]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s16_x4]", + "arguments": [ + "svcount_t png", + "int16_t * rn", + "int64_t vnum", + "svint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s32]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "int64_t vnum", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x2]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s32_x4]", + "arguments": [ + "svcount_t png", + "int32_t * rn", + "int64_t vnum", + "svint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s64]", + "arguments": [ + "svbool_t pg", + "int64_t *base", + "int64_t vnum", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x2]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s64_x4]", + "arguments": [ + "svcount_t png", + "int64_t * rn", + "int64_t vnum", + "svint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_s8]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "int64_t vnum", + "svint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x2]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_s8_x4]", + "arguments": [ + "svcount_t png", + "int8_t * rn", + "int64_t vnum", + "svint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u16]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "int64_t vnum", + "svuint16_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.H" + }, + "pg": { + "register": "Pg.H" + }, + "vnum * svcnth()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ], + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x2]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H, Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u16_x4]", + "arguments": [ + "svcount_t png", + "uint16_t * rn", + "int64_t vnum", + "svuint16x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.H - Zreg2.H }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u32]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "int64_t vnum", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + }, + "vnum * svcntw()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ], + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x2]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S, Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u32_x4]", + "arguments": [ + "svcount_t png", + "uint32_t * rn", + "int64_t vnum", + "svuint32x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.S - Zreg2.S }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u64]", + "arguments": [ + "svbool_t pg", + "uint64_t *base", + "int64_t vnum", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + }, + "vnum * svcntd()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1D" + ], + [ + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x2]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D, Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u64_x4]", + "arguments": [ + "svcount_t png", + "uint64_t * rn", + "int64_t vnum", + "svuint64x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.D - Zreg2.D }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1D" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svstnt1_vnum[_u8]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "int64_t vnum", + "svuint8_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.B" + }, + "pg": { + "register": "Pg.B" + }, + "vnum * svcntb()": { + "register": "Xindex" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ], + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x2]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x2_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B, Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1_vnum[_u8_x4]", + "arguments": [ + "svcount_t png", + "uint8_t * rn", + "int64_t vnum", + "svuint8x4_t zt" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "png": { + "register": "PNreg1" + }, + "rn": { + "register": "Xreg2" + }, + "vnum": { + "register": "Xreg3" + }, + "zt": { + "Z multi-vector": "{ Zreg1.B - Zreg2.B }" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "RDVL", + "MADD", + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int8_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1b_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint8_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1B" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_index[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t index", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base]_offset[_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "int64_t offset", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_s32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u32base_u32]", + "arguments": [ + "svbool_t pg", + "svuint32_t bases", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.S" + }, + "data": { + "register": "Zdata.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 2": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_s32]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint32_t offsets", + "svint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u32]offset[_u32]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint32_t offsets", + "svuint32_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.S" + }, + "offsets": { + "register": "Zoffsets.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 2": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int16_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1h_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint16_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1H" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_index[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t index", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "index * 4": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base]_offset[_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "int64_t offset", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "offset": { + "register": "Xoffset" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_s64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter[_u64base_u64]", + "arguments": [ + "svbool_t pg", + "svuint64_t bases", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "bases": { + "register": "Zbases.D" + }, + "data": { + "register": "Zdata.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[s64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t indices", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]index[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t indices", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "indices * 4": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_s64]", + "arguments": [ + "svbool_t pg", + "int32_t *base", + "svuint64_t offsets", + "svint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svstnt1w_scatter_[u64]offset[_u64]", + "arguments": [ + "svbool_t pg", + "uint32_t *base", + "svuint64_t offsets", + "svuint64_t data" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "base": { + "register": "Xbase" + }, + "data": { + "register": "Zdata.D" + }, + "offsets": { + "register": "Zoffsets.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STNT1W" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUB" + ], + [ + "FADD" + ], + [ + "FSUB" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FADD" + ], + [ + "MOVPRFX", + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "ADD" + ], + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUB" + ], + [ + "SUBR" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsub[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUB" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_n_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s16]", + "arguments": [ + "svint8_t even", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s32]", + "arguments": [ + "svint16_t even", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_s64]", + "arguments": [ + "svint32_t even", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u16]", + "arguments": [ + "svuint8_t even", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.B" + }, + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u32]", + "arguments": [ + "svuint16_t even", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.H" + }, + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubhnt[_u64]", + "arguments": [ + "svuint32_t even", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "even": { + "register": "Ztied.S" + }, + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBHNT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublb[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublbt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLBT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u16]", + "arguments": [ + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u32]", + "arguments": [ + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_n_u64]", + "arguments": [ + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u16]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u32]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsublt[_u64]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBLT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s16]", + "arguments": [ + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s32]", + "arguments": [ + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_n_s64]", + "arguments": [ + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s16]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s32]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubltb[_s64]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBLTB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_m", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_x", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f16]_z", + "arguments": [ + "svbool_t pg", + "svfloat16_t op1", + "float16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_m", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_x", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f32]_z", + "arguments": [ + "svbool_t pg", + "svfloat32_t op1", + "float32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_m", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_x", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSUBR" + ], + [ + "FSUBR" + ], + [ + "FSUB" + ], + [ + "FSUB" + ], + [ + "MOVPRFX", + "FSUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_f64]_z", + "arguments": [ + "svbool_t pg", + "svfloat64_t op1", + "float64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUBR" + ], + [ + "MOVPRFX", + "FSUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]|Ztied2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]|Ztied2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]|Ztied2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]|Ztied2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_n_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_m", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_x", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u16]_z", + "arguments": [ + "svbool_t pg", + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_m", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_x", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u32]_z", + "arguments": [ + "svbool_t pg", + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_m", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_x", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u64]_z", + "arguments": [ + "svbool_t pg", + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_m", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "MOVPRFX", + "SUBR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_x", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUBR" + ], + [ + "SUB" + ], + [ + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsubr[_u8]_z", + "arguments": [ + "svbool_t pg", + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUBR" + ], + [ + "MOVPRFX", + "SUB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwb[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWB" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s16]", + "arguments": [ + "svint16_t op1", + "int8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s32]", + "arguments": [ + "svint32_t op1", + "int16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_s64]", + "arguments": [ + "svint64_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u16]", + "arguments": [ + "svuint16_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u32]", + "arguments": [ + "svuint32_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_n_u64]", + "arguments": [ + "svuint64_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s16]", + "arguments": [ + "svint16_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s32]", + "arguments": [ + "svint32_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_s64]", + "arguments": [ + "svint64_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SSUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svsubwt[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USUBWT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "uint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svsudot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svint8_t op2", + "svuint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUDOT" + ], + [ + "MOVPRFX", + "SUDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f16]", + "arguments": [ + "svfloat16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f32]", + "arguments": [ + "svfloat32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_f64]", + "arguments": [ + "svfloat64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s16]", + "arguments": [ + "svint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s32]", + "arguments": [ + "svint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s64]", + "arguments": [ + "svint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_s8]", + "arguments": [ + "svint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u16]", + "arguments": [ + "svuint16x2_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.H, Zdata1.H}" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u32]", + "arguments": [ + "svuint32x2_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.S, Zdata1.S}" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u64]", + "arguments": [ + "svuint64x2_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.D, Zdata1.D}" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbl2[_u8]", + "arguments": [ + "svuint8x2_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "{Zdata0.B, Zdata1.B}" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f16]", + "arguments": [ + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f32]", + "arguments": [ + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_f64]", + "arguments": [ + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s16]", + "arguments": [ + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s32]", + "arguments": [ + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s64]", + "arguments": [ + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_s8]", + "arguments": [ + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u16]", + "arguments": [ + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u32]", + "arguments": [ + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u64]", + "arguments": [ + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtbl[_u8]", + "arguments": [ + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f16]", + "arguments": [ + "svfloat16_t fallback", + "svfloat16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f32]", + "arguments": [ + "svfloat32_t fallback", + "svfloat32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_f64]", + "arguments": [ + "svfloat64_t fallback", + "svfloat64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s16]", + "arguments": [ + "svint16_t fallback", + "svint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s32]", + "arguments": [ + "svint32_t fallback", + "svint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s64]", + "arguments": [ + "svint64_t fallback", + "svint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_s8]", + "arguments": [ + "svint8_t fallback", + "svint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u16]", + "arguments": [ + "svuint16_t fallback", + "svuint16_t data", + "svuint16_t indices" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.H" + }, + "fallback": { + "register": "Ztied.H" + }, + "indices": { + "register": "Zindices.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u32]", + "arguments": [ + "svuint32_t fallback", + "svuint32_t data", + "svuint32_t indices" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.S" + }, + "fallback": { + "register": "Ztied.S" + }, + "indices": { + "register": "Zindices.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u64]", + "arguments": [ + "svuint64_t fallback", + "svuint64_t data", + "svuint64_t indices" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.D" + }, + "fallback": { + "register": "Ztied.D" + }, + "indices": { + "register": "Zindices.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svtbx[_u8]", + "arguments": [ + "svuint8_t fallback", + "svuint8_t data", + "svuint8_t indices" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "data": { + "register": "Zdata.B" + }, + "fallback": { + "register": "Ztied.B" + }, + "indices": { + "register": "Zindices.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TBX" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtmad[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 0, + "maximum": 7 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTMAD" + ], + [ + "MOVPRFX", + "FTMAD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtrn2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "TRN2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtsmul[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSMUL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f16]", + "arguments": [ + "svfloat16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f32]", + "arguments": [ + "svfloat32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svtssel[_f64]", + "arguments": [ + "svfloat64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FTSSEL" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef2_b", + "arguments": [], + "return_type": { + "value": "svboolx2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s16", + "arguments": [], + "return_type": { + "value": "svint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s32", + "arguments": [], + "return_type": { + "value": "svint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s64", + "arguments": [], + "return_type": { + "value": "svint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_s8", + "arguments": [], + "return_type": { + "value": "svint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u16", + "arguments": [], + "return_type": { + "value": "svuint16x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u32", + "arguments": [], + "return_type": { + "value": "svuint32x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u64", + "arguments": [], + "return_type": { + "value": "svuint64x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef2_u8", + "arguments": [], + "return_type": { + "value": "svuint8x2_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s16", + "arguments": [], + "return_type": { + "value": "svint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s32", + "arguments": [], + "return_type": { + "value": "svint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s64", + "arguments": [], + "return_type": { + "value": "svint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_s8", + "arguments": [], + "return_type": { + "value": "svint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u16", + "arguments": [], + "return_type": { + "value": "svuint16x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u32", + "arguments": [], + "return_type": { + "value": "svuint32x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u64", + "arguments": [], + "return_type": { + "value": "svuint64x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef3_u8", + "arguments": [], + "return_type": { + "value": "svuint8x3_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svundef4_b", + "arguments": [], + "return_type": { + "value": "svboolx4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f16", + "arguments": [], + "return_type": { + "value": "svfloat16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f32", + "arguments": [], + "return_type": { + "value": "svfloat32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_f64", + "arguments": [], + "return_type": { + "value": "svfloat64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s16", + "arguments": [], + "return_type": { + "value": "svint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s32", + "arguments": [], + "return_type": { + "value": "svint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s64", + "arguments": [], + "return_type": { + "value": "svint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_s8", + "arguments": [], + "return_type": { + "value": "svint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u16", + "arguments": [], + "return_type": { + "value": "svuint16x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u32", + "arguments": [], + "return_type": { + "value": "svuint32x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u64", + "arguments": [], + "return_type": { + "value": "svuint64x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef4_u8", + "arguments": [], + "return_type": { + "value": "svuint8x4_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f16", + "arguments": [], + "return_type": { + "value": "svfloat16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f32", + "arguments": [], + "return_type": { + "value": "svfloat32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_f64", + "arguments": [], + "return_type": { + "value": "svfloat64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s16", + "arguments": [], + "return_type": { + "value": "svint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s32", + "arguments": [], + "return_type": { + "value": "svint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s64", + "arguments": [], + "return_type": { + "value": "svint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_s8", + "arguments": [], + "return_type": { + "value": "svint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u16", + "arguments": [], + "return_type": { + "value": "svuint16_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u32", + "arguments": [], + "return_type": { + "value": "svuint32_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u64", + "arguments": [], + "return_type": { + "value": "svuint64_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svundef_u8", + "arguments": [], + "return_type": { + "value": "svuint8_t" + }, + "Architectures": [ + "A64" + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpkhi[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_b]", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "PUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s16]", + "arguments": [ + "svint8_t op" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s32]", + "arguments": [ + "svint16_t op" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_s64]", + "arguments": [ + "svint32_t op" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u16]", + "arguments": [ + "svuint8_t op" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u32]", + "arguments": [ + "svuint16_t op" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svunpklo[_u64]", + "arguments": [ + "svuint32_t op" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op": { + "register": "Zop.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UUNPKLO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "uint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H[*]" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S[*]" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D[*]" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Ztied1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SQADD" + ], + [ + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_n_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "uint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B[*]" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_m", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_x", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s16]_z", + "arguments": [ + "svbool_t pg", + "svint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + }, + "pg": { + "register": "Pg.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_m", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_x", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s32]_z", + "arguments": [ + "svbool_t pg", + "svint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + }, + "pg": { + "register": "Pg.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_m", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_x", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s64]_z", + "arguments": [ + "svbool_t pg", + "svint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + }, + "pg": { + "register": "Pg.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_m", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_x", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "SUQADD" + ], + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svuqadd[_s8]_z", + "arguments": [ + "svbool_t pg", + "svint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + }, + "pg": { + "register": "Pg.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "MOVPRFX", + "SUQADD" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_n_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "int8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B[*]" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusdot_lane[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3", + "uint64_t imm_index" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm_index": { + "minimum": 0, + "maximum": 3 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USDOT" + ], + [ + "MOVPRFX", + "USDOT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svusmmla[_s32]", + "arguments": [ + "svint32_t op1", + "svuint8_t op2", + "svint8_t op3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.B" + }, + "op3": { + "register": "Zop3.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "USMMLA" + ], + [ + "MOVPRFX", + "USMMLA" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svuzp2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "UZP2" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilege_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEGT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilegt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEHI" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELE" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilele_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELS" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b16[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b32[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b64[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s32]", + "arguments": [ + "int32_t op1", + "int32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s64]", + "arguments": [ + "int64_t op1", + "int64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_s64]_x2", + "arguments": [ + "int64_t rn", + "int64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u32]", + "arguments": [ + "uint32_t op1", + "uint32_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Wop1" + }, + "op2": { + "register": "Wop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u64]", + "arguments": [ + "uint64_t op1", + "uint64_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_b8[_u64]_x2", + "arguments": [ + "uint64_t rn", + "uint64_t rm" + ], + "return_type": { + "value": "svboolx2_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c16[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c32[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c64[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_s64]", + "arguments": [ + "int64_t rn", + "int64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELT" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilelt_c8[_u64]", + "arguments": [ + "uint64_t rn", + "uint64_t rm", + "uint64_t vl" + ], + "return_type": { + "value": "svcount_t" + }, + "Arguments_Preparation": { + "rm": { + "register": "Xreg2" + }, + "rn": { + "register": "Xreg1" + }, + "vl": { + "immediate": "" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILELO" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilerw[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILERW" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f16]", + "arguments": [ + "const float16_t *op1", + "const float16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f32]", + "arguments": [ + "const float32_t *op1", + "const float32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_f64]", + "arguments": [ + "const float64_t *op1", + "const float64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s16]", + "arguments": [ + "const int16_t *op1", + "const int16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s32]", + "arguments": [ + "const int32_t *op1", + "const int32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s64]", + "arguments": [ + "const int64_t *op1", + "const int64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_s8]", + "arguments": [ + "const int8_t *op1", + "const int8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u16]", + "arguments": [ + "const uint16_t *op1", + "const uint16_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u32]", + "arguments": [ + "const uint32_t *op1", + "const uint32_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u64]", + "arguments": [ + "const uint64_t *op1", + "const uint64_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svwhilewr[_u8]", + "arguments": [ + "const uint8_t *op1", + "const uint8_t *op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Xop1" + }, + "op2": { + "register": "Xop2" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WHILEWR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svwrffr", + "arguments": [ + "svbool_t op" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "op": { + "register": "Pop.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "WRFFR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 16 + }, + "op1": { + "register": "Zop1.H|Ztied1.H" + }, + "op2": { + "register": "Zop2.H|Ztied2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 32 + }, + "op1": { + "register": "Zop1.S|Ztied1.S" + }, + "op2": { + "register": "Zop2.S|Ztied2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 64 + }, + "op1": { + "register": "Zop1.D|Ztied1.D" + }, + "op2": { + "register": "Zop2.D|Ztied2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE2", + "name": "svxar[_n_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2", + "uint64_t imm3" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "imm3": { + "minimum": 1, + "maximum": 8 + }, + "op1": { + "register": "Zop1.B|Ztied1.B" + }, + "op2": { + "register": "Zop2.B|Ztied2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "XAR" + ], + [ + "XAR" + ], + [ + "MOVPRFX", + "XAR" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip1q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP1" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.H" + }, + "op2": { + "register": "Zop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.S" + }, + "op2": { + "register": "Zop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.D" + }, + "op2": { + "register": "Zop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.B" + }, + "op2": { + "register": "Zop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b16", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.H" + }, + "op2": { + "register": "Pop2.H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b32", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.S" + }, + "op2": { + "register": "Pop2.S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b64", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.D" + }, + "op2": { + "register": "Pop2.D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2_b8", + "arguments": [ + "svbool_t op1", + "svbool_t op2" + ], + "return_type": { + "value": "svbool_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Pop1.B" + }, + "op2": { + "register": "Pop2.B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f16]", + "arguments": [ + "svfloat16_t op1", + "svfloat16_t op2" + ], + "return_type": { + "value": "svfloat16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f32]", + "arguments": [ + "svfloat32_t op1", + "svfloat32_t op2" + ], + "return_type": { + "value": "svfloat32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_f64]", + "arguments": [ + "svfloat64_t op1", + "svfloat64_t op2" + ], + "return_type": { + "value": "svfloat64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s16]", + "arguments": [ + "svint16_t op1", + "svint16_t op2" + ], + "return_type": { + "value": "svint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s32]", + "arguments": [ + "svint32_t op1", + "svint32_t op2" + ], + "return_type": { + "value": "svint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s64]", + "arguments": [ + "svint64_t op1", + "svint64_t op2" + ], + "return_type": { + "value": "svint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_s8]", + "arguments": [ + "svint8_t op1", + "svint8_t op2" + ], + "return_type": { + "value": "svint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u16]", + "arguments": [ + "svuint16_t op1", + "svuint16_t op2" + ], + "return_type": { + "value": "svuint16_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u32]", + "arguments": [ + "svuint32_t op1", + "svuint32_t op2" + ], + "return_type": { + "value": "svuint32_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u64]", + "arguments": [ + "svuint64_t op1", + "svuint64_t op2" + ], + "return_type": { + "value": "svuint64_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" + ] + ] + }, + { + "SIMD_ISA": "SVE", + "name": "svzip2q[_u8]", + "arguments": [ + "svuint8_t op1", + "svuint8_t op2" + ], + "return_type": { + "value": "svuint8_t" + }, + "Arguments_Preparation": { + "op1": { + "register": "Zop1.Q" + }, + "op2": { + "register": "Zop2.Q" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ZIP2" ] ] }, @@ -4404,7 +207121,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -4473,7 +207190,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -4519,7 +207236,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4S" + "register": "Vm.4S" } }, "Architectures": [ @@ -5179,6 +207896,276 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamax_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamaxq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMAX" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f16", + "arguments": [ + "float16x4_t vn", + "float16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vamin_f32", + "arguments": [ + "float32x2_t vn", + "float32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f16", + "arguments": [ + "float16x8_t vn", + "float16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f32", + "arguments": [ + "float32x4_t vn", + "float32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vaminq_f64", + "arguments": [ + "float64x2_t vn", + "float64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FAMIN" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vand_s16", @@ -5658,8 +208645,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5685,8 +208676,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5712,8 +208707,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5739,8 +208738,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5766,8 +208769,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5793,8 +208800,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5820,8 +208831,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -5847,8 +208862,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -7253,7 +210272,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4H " + "register": "Vn.4H" }, "b": { "register": "Vm.4H" @@ -7281,7 +210300,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S " + "register": "Vn.2S" }, "b": { "register": "Vm.2S" @@ -7309,7 +210328,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4H " + "register": "Vn.4H" }, "b": { "register": "Vm.4H" @@ -7337,7 +210356,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S " + "register": "Vn.2S" }, "b": { "register": "Vm.2S" @@ -7365,7 +210384,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.8H " + "register": "Vn.8H" }, "b": { "register": "Vm.8H" @@ -7393,7 +210412,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4S " + "register": "Vn.4S" }, "b": { "register": "Vm.4S" @@ -7421,7 +210440,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2D " + "register": "Vn.2D" }, "b": { "register": "Vm.2D" @@ -7448,7 +210467,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.8H " + "register": "Vn.8H" }, "b": { "register": "Vm.8H" @@ -7476,7 +210495,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.4S " + "register": "Vn.4S" }, "b": { "register": "Vm.4S" @@ -7504,7 +210523,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2D " + "register": "Vn.2D" }, "b": { "register": "Vm.2D" @@ -15417,8 +218436,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, "r": { "register": "Vd.4H" } @@ -15445,8 +218468,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, "r": { "register": "Vd.2S" } @@ -15474,8 +218501,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15507,8 +218538,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -15540,8 +218575,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15573,8 +218612,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15590,7 +218633,7 @@ "instructions": [ [ "DUP", - "FCMLA" + "" ] ] }, @@ -15606,8 +218649,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, "r": { "register": "Vd.4H" } @@ -15634,8 +218681,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, "r": { "register": "Vd.2S" } @@ -15663,8 +218714,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15696,8 +218751,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -15729,8 +218788,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15762,8 +218825,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15779,7 +218846,7 @@ "instructions": [ [ "DUP", - "FCMLA" + "" ] ] }, @@ -15795,8 +218862,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, "r": { "register": "Vd.4H" } @@ -15823,8 +218894,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, "r": { "register": "Vd.2S" } @@ -15852,8 +218927,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15885,8 +218964,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -15918,8 +219001,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15951,8 +219038,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -15968,7 +219059,7 @@ "instructions": [ [ "DUP", - "FCMLA" + "" ] ] }, @@ -15984,8 +219075,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.4H" + }, "r": { "register": "Vd.4H" } @@ -16012,8 +219107,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.2S" + }, "r": { "register": "Vd.2S" } @@ -16041,8 +219140,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16074,8 +219177,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -16107,8 +219214,12 @@ "value": "float16x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16140,8 +219251,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16157,7 +219272,7 @@ "instructions": [ [ "DUP", - "FCMLA" + "" ] ] }, @@ -16173,8 +219288,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, "r": { "register": "Vd.8H" } @@ -16201,8 +219320,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, "r": { "register": "Vd.4S" } @@ -16229,8 +219352,12 @@ "value": "float64x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, "r": { "register": "Vd.2D" } @@ -16257,8 +219384,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16290,8 +219421,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -16323,8 +219458,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -16356,8 +219495,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16388,8 +219531,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, "r": { "register": "Vd.8H" } @@ -16416,8 +219563,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, "r": { "register": "Vd.4S" } @@ -16444,8 +219595,12 @@ "value": "float64x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, "r": { "register": "Vd.2D" } @@ -16472,8 +219627,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16505,8 +219664,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -16538,8 +219701,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -16571,8 +219738,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16603,8 +219774,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, "r": { "register": "Vd.8H" } @@ -16631,8 +219806,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, "r": { "register": "Vd.4S" } @@ -16659,8 +219838,12 @@ "value": "float64x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, "r": { "register": "Vd.2D" } @@ -16687,8 +219870,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16720,8 +219907,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -16753,8 +219944,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -16786,8 +219981,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16818,8 +220017,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.8H" + }, "r": { "register": "Vd.8H" } @@ -16846,8 +220049,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.4S" + }, "r": { "register": "Vd.4S" } @@ -16874,8 +220081,12 @@ "value": "float64x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.2D" + }, + "b": { + "register": "Vm.2D" + }, "r": { "register": "Vd.2D" } @@ -16902,8 +220113,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 1 @@ -16935,8 +220150,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 0 @@ -16968,8 +220187,12 @@ "value": "float16x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.8H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -17001,8 +220224,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vn.4S" + }, + "b": { + "register": "Vm.S" + }, "lane": { "minimum": 0, "maximum": 1 @@ -20921,7 +224148,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -20991,7 +224217,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -21933,7 +225158,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -22003,7 +225227,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -22261,7 +225484,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -22331,7 +225553,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -22777,7 +225998,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -22847,7 +226067,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -23293,7 +226512,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -23363,7 +226581,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -28161,8 +231378,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28188,8 +231409,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28215,8 +231440,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28242,8 +231471,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28269,8 +231502,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28296,8 +231533,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28323,8 +231564,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -28350,8 +231595,12 @@ "a": { "register": "Vn.16B" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.16B" + }, + "c": { + "register": "Va.16B" + } }, "Architectures": [ "A64" @@ -30097,7 +233346,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -30680,7 +233929,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -30846,8 +234095,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -30875,8 +234128,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30908,8 +234165,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -30941,8 +234202,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -30974,8 +234239,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31006,8 +234275,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31034,8 +234307,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31063,8 +234340,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31096,8 +234377,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31129,8 +234414,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31162,8 +234451,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31194,8 +234487,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31222,8 +234519,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31251,8 +234552,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31284,8 +234589,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31317,8 +234626,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31350,8 +234663,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31382,8 +234699,12 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.2H" + }, + "b": { + "register": "Vd.2H" + }, "r": { "register": "Vd.2S" } @@ -31410,8 +234731,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31439,8 +234764,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31472,8 +234801,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 3 @@ -31505,8 +234838,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31538,8 +234875,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vm.H" + }, "lane": { "minimum": 0, "maximum": 7 @@ -31570,8 +234911,12 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, + "a": { + "register": "Vd.4H" + }, + "b": { + "register": "Vd.4H" + }, "r": { "register": "Vd.4S" } @@ -31911,7 +235256,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.4H " + "register": "Vd.4H" }, "b": { "register": "Vn.4H" @@ -32492,7 +235837,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vd.8H " + "register": "Vd.8H" }, "b": { "register": "Vn.8H" @@ -34868,230 +238213,6 @@ ] ] }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x1_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x1_t src", - "const int lane" - ], - "return_type": { - "value": "int64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_u64", - "arguments": [ - "uint64_t const * ptr", - "uint64x2_t src", - "const int lane" - ], - "return_type": { - "value": "uint64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_s64", - "arguments": [ - "int64_t const * ptr", - "int64x2_t src", - "const int lane" - ], - "return_type": { - "value": "int64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x1_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x1_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_p64", - "arguments": [ - "poly64_t const * ptr", - "poly64x2_t src", - "const int lane" - ], - "return_type": { - "value": "poly64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vldap1q_lane_f64", - "arguments": [ - "float64_t const * ptr", - "float64x2_t src", - "const int lane" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "src": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDAP1" - ] - ] - }, { "SIMD_ISA": "Neon", "name": "vld1_dup_f16", @@ -39947,7 +243068,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -39981,7 +243105,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40015,7 +243142,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40047,7 +243177,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40081,7 +243214,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40113,7 +243249,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40147,7 +243286,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40181,7 +243323,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40215,7 +243360,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40247,7 +243395,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -40281,7 +243432,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { "register": "Vt2.4H" } }, @@ -40315,7 +243469,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { "register": "Vt2.2S" } }, @@ -40349,7 +243506,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { "register": "Vt2.1D" } }, @@ -40381,7 +243541,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { "register": "Vt2.8B" } }, @@ -41104,7 +244267,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41138,7 +244304,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41172,7 +244341,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41204,7 +244376,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41238,7 +244413,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41270,7 +244448,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41302,7 +244483,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41336,7 +244520,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41370,7 +244557,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41402,7 +244592,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -41434,7 +244627,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { "register": "Vt2.8H" } }, @@ -41468,7 +244664,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { "register": "Vt2.4S" } }, @@ -41502,7 +244701,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { "register": "Vt2.2D" } }, @@ -41534,7 +244736,10 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { "register": "Vt2.16B" } }, @@ -42255,7 +245460,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42289,7 +245500,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42323,7 +245540,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42355,7 +245578,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42389,7 +245618,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42421,7 +245656,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42455,7 +245696,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42489,7 +245736,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42523,7 +245776,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42555,7 +245814,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -42589,7 +245854,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { "register": "Vt3.4H" } }, @@ -42623,7 +245894,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { "register": "Vt3.2S" } }, @@ -42657,7 +245934,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { "register": "Vt3.1D" } }, @@ -42689,7 +245972,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { "register": "Vt3.8B" } }, @@ -43412,7 +246701,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43446,7 +246741,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43480,7 +246781,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43512,7 +246819,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43546,7 +246859,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43578,7 +246897,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43610,7 +246935,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43644,7 +246975,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43678,7 +247015,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43710,7 +247053,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -43742,7 +247091,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { "register": "Vt3.8H" } }, @@ -43776,7 +247131,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { "register": "Vt3.4S" } }, @@ -43810,7 +247171,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { "register": "Vt3.2D" } }, @@ -43842,7 +247209,13 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { "register": "Vt3.16B" } }, @@ -44563,7 +247936,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44597,7 +247979,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44631,7 +248022,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44663,7 +248063,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44697,7 +248106,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44729,7 +248147,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44763,7 +248190,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44797,7 +248233,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44831,7 +248276,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44863,7 +248317,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -44897,7 +248360,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4H" + }, + "src.val[1]": { + "register": "Vt2.4H" + }, + "src.val[2]": { + "register": "Vt3.4H" + }, + "src.val[3]": { "register": "Vt4.4H" } }, @@ -44931,7 +248403,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2S" + }, + "src.val[1]": { + "register": "Vt2.2S" + }, + "src.val[2]": { + "register": "Vt3.2S" + }, + "src.val[3]": { "register": "Vt4.2S" } }, @@ -44965,7 +248446,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.1D" + }, + "src.val[1]": { + "register": "Vt2.1D" + }, + "src.val[2]": { + "register": "Vt3.1D" + }, + "src.val[3]": { "register": "Vt4.1D" } }, @@ -44997,7 +248487,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8B" + }, + "src.val[1]": { + "register": "Vt2.8B" + }, + "src.val[2]": { + "register": "Vt3.8B" + }, + "src.val[3]": { "register": "Vt4.8B" } }, @@ -45720,7 +249219,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45754,7 +249262,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -45788,7 +249305,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -45820,7 +249346,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45854,7 +249389,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -45886,7 +249430,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -45918,7 +249471,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -45952,7 +249514,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -45986,7 +249557,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -46018,7 +249598,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -46050,7 +249639,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.8H" + }, + "src.val[1]": { + "register": "Vt2.8H" + }, + "src.val[2]": { + "register": "Vt3.8H" + }, + "src.val[3]": { "register": "Vt4.8H" } }, @@ -46084,7 +249682,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.4S" + }, + "src.val[1]": { + "register": "Vt2.4S" + }, + "src.val[2]": { + "register": "Vt3.4S" + }, + "src.val[3]": { "register": "Vt4.4S" } }, @@ -46118,7 +249725,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.2D" + }, + "src.val[1]": { + "register": "Vt2.2D" + }, + "src.val[2]": { + "register": "Vt3.2D" + }, + "src.val[3]": { "register": "Vt4.2D" } }, @@ -46150,7 +249766,16 @@ "ptr": { "register": "Xn" }, - "src": { + "src.val[0]": { + "register": "Vt.16B" + }, + "src.val[1]": { + "register": "Vt2.16B" + }, + "src.val[2]": { + "register": "Vt3.16B" + }, + "src.val[3]": { "register": "Vt4.16B" } }, @@ -46432,6 +250057,262 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vldap1_lane_f64", + "arguments": [ + "float64_t const * ptr", + "float64x1_t src", + "const int lane" + ], + "return_type": { + "value": "float64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1_lane_p64", + "arguments": [ + "poly64_t const * ptr", + "poly64x1_t src", + "const int lane" + ], + "return_type": { + "value": "poly64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1_lane_s64", + "arguments": [ + "int64_t const * ptr", + "int64x1_t src", + "const int lane" + ], + "return_type": { + "value": "int64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1_lane_u64", + "arguments": [ + "uint64_t const * ptr", + "uint64x1_t src", + "const int lane" + ], + "return_type": { + "value": "uint64x1_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1q_lane_f64", + "arguments": [ + "float64_t const * ptr", + "float64x2_t src", + "const int lane" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1q_lane_p64", + "arguments": [ + "poly64_t const * ptr", + "poly64x2_t src", + "const int lane" + ], + "return_type": { + "value": "poly64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1q_lane_s64", + "arguments": [ + "int64_t const * ptr", + "int64x2_t src", + "const int lane" + ], + "return_type": { + "value": "int64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vldap1q_lane_u64", + "arguments": [ + "uint64_t const * ptr", + "uint64x2_t src", + "const int lane" + ], + "return_type": { + "value": "uint64x2_t" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 1 + }, + "ptr": { + "register": "Xn" + }, + "src": { + "register": "Vt.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LDAP1" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vldrq_p128", @@ -46456,6 +250337,1374 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_f16", + "arguments": [ + "float16x4_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_p16", + "arguments": [ + "poly16x4_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_p8", + "arguments": [ + "poly8x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_s16", + "arguments": [ + "int16x4_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_s8", + "arguments": [ + "int8x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_u16", + "arguments": [ + "uint16x4_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_lane_u8", + "arguments": [ + "uint8x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_f16", + "arguments": [ + "float16x4_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_p16", + "arguments": [ + "poly16x4_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_p8", + "arguments": [ + "poly8x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_s16", + "arguments": [ + "int16x4_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_s8", + "arguments": [ + "int8x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_u16", + "arguments": [ + "uint16x4_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2_laneq_u8", + "arguments": [ + "uint8x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_f16", + "arguments": [ + "float16x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_p16", + "arguments": [ + "poly16x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_p8", + "arguments": [ + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_s16", + "arguments": [ + "int16x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_s8", + "arguments": [ + "int8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_u16", + "arguments": [ + "uint16x8_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_lane_u8", + "arguments": [ + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_f16", + "arguments": [ + "float16x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_p16", + "arguments": [ + "poly16x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_p8", + "arguments": [ + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_s16", + "arguments": [ + "int16x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_s8", + "arguments": [ + "int8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_u16", + "arguments": [ + "uint16x8_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 7 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti2q_laneq_u8", + "arguments": [ + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI2" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_f16_x2", + "arguments": [ + "float16x8x2_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_p16_x2", + "arguments": [ + "poly16x8x2_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_p8", + "arguments": [ + "poly8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 0 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_s16_x2", + "arguments": [ + "int16x8x2_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_s8", + "arguments": [ + "int8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 0 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_u16_x2", + "arguments": [ + "uint16x8x2_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_lane_u8", + "arguments": [ + "uint8x16_t vn", + "uint8x8_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 0 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_f16_x2", + "arguments": [ + "float16x8x2_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_p16_x2", + "arguments": [ + "poly16x8x2_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_p8", + "arguments": [ + "poly8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "poly8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_s16_x2", + "arguments": [ + "int16x8x2_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_s8", + "arguments": [ + "int8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "int8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_u16_x2", + "arguments": [ + "uint16x8x2_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint16x8_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 3 + }, + "vm": { + "register": "Vm" + }, + "vn.val[0]": { + "register": "Vn1.8H" + }, + "vn.val[1]": { + "register": "Vn2.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vluti4q_laneq_u8", + "arguments": [ + "uint8x16_t vn", + "uint8x16_t vm", + "const int index" + ], + "return_type": { + "value": "uint8x16_t" + }, + "Arguments_Preparation": { + "index": { + "minimum": 0, + "maximum": 1 + }, + "vm": { + "register": "Vm" + }, + "vn": { + "register": "Vn.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "LUTI4" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vmax_f16", @@ -47388,7 +252637,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -47457,7 +252706,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -48631,7 +253880,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -48700,7 +253949,7 @@ }, "Arguments_Preparation": { "a": { - "register": "Vn.2S" + "register": "Vm.2S" } }, "Architectures": [ @@ -48971,7 +254220,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -49002,7 +254251,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -49019,13 +254268,10 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -49034,7 +254280,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -49203,20 +254449,17 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -49393,7 +254636,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -50980,7 +256223,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51011,7 +256254,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51028,13 +256271,10 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -51043,7 +256283,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51212,20 +256452,17 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51402,7 +256639,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51765,7 +257002,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51796,7 +257033,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51813,13 +257050,10 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -51828,7 +257062,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -51997,20 +257231,17 @@ "value": "float32x2_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -52187,7 +257418,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -53774,7 +259005,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -53805,7 +259036,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -53822,13 +259053,10 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 1 - }, - "v": {} + } }, "Architectures": [ "v7", @@ -53837,7 +259065,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -54006,20 +259234,17 @@ "value": "float32x4_t" }, "Arguments_Preparation": { - "a": {}, - "b": {}, "lane": { "minimum": 0, "maximum": 3 - }, - "v": {} + } }, "Architectures": [ "A64" ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -54196,7 +259421,7 @@ ], "instructions": [ [ - "RESULT[I]" + "result" ] ] }, @@ -75953,8 +281178,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -75980,8 +281208,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76007,8 +281238,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76034,8 +281268,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76061,8 +281298,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76088,8 +281328,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76115,8 +281358,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76142,8 +281391,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76169,8 +281424,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76196,8 +281457,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76223,8 +281490,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76250,8 +281523,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76277,8 +281556,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76304,8 +281592,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76331,8 +281628,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76358,8 +281664,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76385,8 +281700,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76412,8 +281736,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -76629,8 +281962,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76660,8 +281996,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76691,8 +282030,11 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76722,8 +282064,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76753,8 +282098,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76784,8 +282132,11 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" } }, "Architectures": [ @@ -76815,8 +282166,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76846,8 +282203,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76877,8 +282240,14 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76908,8 +282277,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76939,8 +282314,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -76970,8 +282351,14 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" } }, "Architectures": [ @@ -77001,8 +282388,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77032,8 +282428,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77063,8 +282468,17 @@ "idx": { "register": "Vm.8B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77094,8 +282508,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77125,8 +282548,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77156,8 +282588,17 @@ "idx": { "register": "Vm.16B" }, - "t": { + "t.val[0]": { "register": "Vn.16B" + }, + "t.val[1]": { + "register": "Vn+1.16B" + }, + "t.val[2]": { + "register": "Vn+2.16B" + }, + "t.val[3]": { + "register": "Vn+3.16B" } }, "Architectures": [ @@ -77543,7 +282984,9 @@ "a": { "register": "Vn.2D" }, - "b": {} + "b": { + "register": "Vm.2D" + } }, "Architectures": [ "A64" @@ -93507,6 +298950,141 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vscale_f16", + "arguments": [ + "float16x4_t vn", + "int16x4_t vm" + ], + "return_type": { + "value": "float16x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4H" + }, + "vn": { + "register": "Vn.4H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vscale_f32", + "arguments": [ + "float32x2_t vn", + "int32x2_t vm" + ], + "return_type": { + "value": "float32x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2S" + }, + "vn": { + "register": "Vn.2S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vscaleq_f16", + "arguments": [ + "float16x8_t vn", + "int16x8_t vm" + ], + "return_type": { + "value": "float16x8_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.8H" + }, + "vn": { + "register": "Vn.8H" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vscaleq_f32", + "arguments": [ + "float32x4_t vn", + "int32x4_t vm" + ], + "return_type": { + "value": "float32x4_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.4S" + }, + "vn": { + "register": "Vn.4S" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vscaleq_f64", + "arguments": [ + "float64x2_t vn", + "int64x2_t vm" + ], + "return_type": { + "value": "float64x2_t" + }, + "Arguments_Preparation": { + "vm": { + "register": "Vm.2D" + }, + "vn": { + "register": "Vn.2D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FSCALE" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vset_lane_f16", @@ -94769,7 +300347,9 @@ "value": "uint64x2_t" }, "Arguments_Preparation": { - "hash_ab": {}, + "hash_ab": { + "register": "Vm.2D" + }, "hash_c_": { "register": "Qn" }, @@ -94804,7 +300384,9 @@ "hash_gf": { "register": "Qn" }, - "kwh_kwh2": {} + "kwh_kwh2": { + "register": "Vm.2D" + } }, "Architectures": [ "A64" @@ -94860,7 +300442,9 @@ "w14_15": { "register": "Vn.2D" }, - "w9_10": {} + "w9_10": { + "register": "Vm.2D" + } }, "Architectures": [ "A64" @@ -98006,8 +303590,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {} + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } }, "Architectures": [ "A64" @@ -98033,8 +303621,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {} + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + } }, "Architectures": [ "A64" @@ -98060,8 +303652,12 @@ "a": { "register": "Vn.4S" }, - "b": {}, - "c": {} + "b": { + "register": "Vm.4S" + }, + "c": { + "register": "Va.4S" + } }, "Architectures": [ "A64" @@ -98088,8 +303684,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {}, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + }, "imm2": { "minimum": 0, "maximum": 3 @@ -98120,8 +303720,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {}, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + }, "imm2": { "minimum": 0, "maximum": 3 @@ -98152,8 +303756,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {}, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + }, "imm2": { "minimum": 0, "maximum": 3 @@ -98184,8 +303792,12 @@ "a": { "register": "Vd.4S" }, - "b": {}, - "c": {}, + "b": { + "register": "Vn.4S" + }, + "c": { + "register": "Vm.4S" + }, "imm2": { "minimum": 0, "maximum": 3 @@ -98214,7 +303826,9 @@ "a": { "register": "Vn.4S" }, - "b": {} + "b": { + "register": "Vm.4S" + } }, "Architectures": [ "A64" @@ -98239,7 +303853,9 @@ "a": { "register": "Vd.4S" }, - "b": {} + "b": { + "register": "Vn.4S" + } }, "Architectures": [ "A64" @@ -100197,7 +305813,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -100226,7 +305845,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -100255,7 +305880,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -100313,7 +305947,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -100342,7 +305979,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -100371,7 +306014,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -100427,7 +306079,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -100454,7 +306109,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -100481,7 +306142,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -100967,6 +306637,42 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vst1_mf8_x4", + "arguments": [ + "int8_t * ptr", + "int8x8x4_t val" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { + "register": "Vt4.8B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vst1_p16", @@ -101010,7 +306716,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -101039,7 +306748,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -101068,7 +306783,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -101125,7 +306849,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -101153,7 +306880,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -101181,7 +306914,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -101238,7 +306980,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -101267,7 +307012,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -101296,7 +307047,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -101354,7 +307114,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -101383,7 +307146,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -101412,7 +307181,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -101470,7 +307248,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -101499,7 +307280,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -101528,7 +307315,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -101586,7 +307382,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -101615,7 +307414,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -101644,7 +307449,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -101702,7 +307516,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -101731,7 +307548,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -101760,7 +307583,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -101818,7 +307650,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -101847,7 +307682,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -101876,7 +307717,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -101934,7 +307784,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -101963,7 +307816,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -101992,7 +307851,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -102050,7 +307918,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -102079,7 +307950,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -102108,7 +307985,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -102166,7 +308052,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -102195,7 +308084,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -102224,7 +308119,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -102282,7 +308186,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -102311,7 +308218,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -102340,7 +308253,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -102398,7 +308320,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -102427,7 +308352,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -102456,7 +308387,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -102512,7 +308452,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -102539,7 +308482,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -102566,7 +308515,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -103052,6 +309010,42 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vst1q_mf8_x4", + "arguments": [ + "int8_t * ptr", + "int8x16x4_t val" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "ptr": { + "register": "Xn" + }, + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { + "register": "Vt4.16B" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "ST1" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vst1q_p16", @@ -103095,7 +309089,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -103124,7 +309121,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -103153,7 +309156,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -103210,7 +309222,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -103238,7 +309253,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -103267,7 +309288,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -103324,7 +309354,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -103353,7 +309386,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -103382,7 +309421,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -103440,7 +309488,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -103469,7 +309520,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -103498,7 +309555,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -103556,7 +309622,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -103585,7 +309654,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -103614,7 +309689,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -103672,7 +309756,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -103701,7 +309788,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -103730,7 +309823,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -103788,7 +309890,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -103817,7 +309922,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -103846,7 +309957,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -103904,7 +310024,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -103933,7 +310056,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -103962,7 +310091,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -104020,7 +310158,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -104049,7 +310190,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -104078,7 +310225,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -104136,7 +310292,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -104165,7 +310324,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -104194,7 +310359,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -104252,7 +310426,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -104281,7 +310458,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -104310,7 +310493,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -104339,7 +310531,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104368,7 +310563,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -104397,7 +310595,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104429,7 +310630,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104463,7 +310667,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -104497,7 +310704,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104529,7 +310739,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104563,7 +310776,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104595,7 +310811,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -104629,7 +310848,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104663,7 +310885,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -104697,7 +310922,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104729,7 +310957,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -104763,7 +310994,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104797,7 +311031,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -104831,7 +311068,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104863,7 +311103,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -104892,7 +311135,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -104921,7 +311167,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -104949,7 +311198,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -104978,7 +311230,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -105007,7 +311262,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -105036,7 +311294,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -105065,7 +311326,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -105094,7 +311358,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { "register": "Vt2.4H" } }, @@ -105123,7 +311390,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { "register": "Vt2.2S" } }, @@ -105152,7 +311422,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { "register": "Vt2.1D" } }, @@ -105181,7 +311454,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { "register": "Vt2.8B" } }, @@ -105210,7 +311486,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105239,7 +311518,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -105268,7 +311550,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105300,7 +311585,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105334,7 +311622,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -105363,12 +311654,15 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 2 + "maximum": 1 }, "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105400,7 +311694,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105434,7 +311731,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105466,7 +311766,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -105498,7 +311801,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105532,7 +311838,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -105566,7 +311875,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105598,7 +311910,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -105630,7 +311945,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105664,7 +311982,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -105698,7 +312019,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105730,7 +312054,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -105757,7 +312084,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105786,7 +312116,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105813,7 +312146,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -105842,7 +312178,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105871,7 +312210,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -105900,7 +312242,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -105927,7 +312272,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -105956,7 +312304,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { "register": "Vt2.8H" } }, @@ -105985,7 +312336,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { "register": "Vt2.4S" } }, @@ -106014,7 +312368,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { "register": "Vt2.2D" } }, @@ -106041,7 +312398,10 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { "register": "Vt2.16B" } }, @@ -106070,7 +312430,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106099,7 +312465,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106128,7 +312500,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106160,7 +312538,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106194,7 +312578,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106228,7 +312618,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106260,7 +312656,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106294,7 +312696,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106326,7 +312734,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106360,7 +312774,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106394,7 +312814,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106428,7 +312854,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106460,7 +312892,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106494,7 +312932,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106528,7 +312972,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106562,7 +313012,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106594,7 +313050,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106623,7 +313085,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106652,7 +313120,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106680,7 +313154,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106709,7 +313189,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106738,7 +313224,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106767,7 +313259,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106796,7 +313294,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106825,7 +313329,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { "register": "Vt3.4H" } }, @@ -106854,7 +313364,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { "register": "Vt3.2S" } }, @@ -106883,7 +313399,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { "register": "Vt3.1D" } }, @@ -106912,7 +313434,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { "register": "Vt3.8B" } }, @@ -106941,7 +313469,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -106970,7 +313504,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -106999,7 +313539,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107031,7 +313577,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107065,7 +313617,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -107099,7 +313657,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107131,7 +313695,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107165,7 +313735,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107197,7 +313773,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107231,7 +313813,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107265,7 +313853,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -107299,7 +313893,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107331,7 +313931,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107365,7 +313971,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107399,7 +314011,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -107433,7 +314051,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107465,7 +314089,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107494,7 +314124,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107523,7 +314159,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107550,7 +314192,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107579,7 +314227,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107608,7 +314262,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -107637,7 +314297,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107664,7 +314330,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107693,7 +314365,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { "register": "Vt3.8H" } }, @@ -107722,7 +314400,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { "register": "Vt3.4S" } }, @@ -107751,7 +314435,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { "register": "Vt3.2D" } }, @@ -107778,7 +314468,13 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { "register": "Vt3.16B" } }, @@ -107807,7 +314503,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -107836,7 +314541,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -107865,7 +314579,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -107897,7 +314620,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -107931,7 +314663,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -107965,7 +314706,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -107997,7 +314747,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108031,7 +314790,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108063,7 +314831,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108097,7 +314874,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108131,7 +314917,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -108165,7 +314960,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108197,7 +315001,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108231,7 +315044,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108265,7 +315087,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -108299,7 +315130,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108331,7 +315171,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108360,7 +315209,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108389,7 +315247,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108417,7 +315284,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108446,7 +315322,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108475,7 +315360,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -108504,7 +315398,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108533,7 +315436,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108562,7 +315474,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4H" + }, + "val.val[1]": { + "register": "Vt2.4H" + }, + "val.val[2]": { + "register": "Vt3.4H" + }, + "val.val[3]": { "register": "Vt4.4H" } }, @@ -108591,7 +315512,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2S" + }, + "val.val[1]": { + "register": "Vt2.2S" + }, + "val.val[2]": { + "register": "Vt3.2S" + }, + "val.val[3]": { "register": "Vt4.2S" } }, @@ -108620,7 +315550,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.1D" + }, + "val.val[1]": { + "register": "Vt2.1D" + }, + "val.val[2]": { + "register": "Vt3.1D" + }, + "val.val[3]": { "register": "Vt4.1D" } }, @@ -108649,7 +315588,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8B" + }, + "val.val[1]": { + "register": "Vt2.8B" + }, + "val.val[2]": { + "register": "Vt3.8B" + }, + "val.val[3]": { "register": "Vt4.8B" } }, @@ -108678,7 +315626,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -108707,7 +315664,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -108736,7 +315702,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -108768,7 +315743,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -108802,7 +315786,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -108836,7 +315829,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -108868,7 +315870,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -108902,7 +315913,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -108934,7 +315954,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -108966,7 +315995,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -109000,7 +316038,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -109034,7 +316081,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -109066,7 +316122,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -109098,7 +316163,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -109132,7 +316206,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -109166,7 +316249,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -109198,7 +316290,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -109225,7 +316326,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -109254,7 +316364,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -109281,7 +316400,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -109310,7 +316438,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -109339,7 +316476,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -109368,7 +316514,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -109395,7 +316550,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -109424,7 +316588,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.8H" + }, + "val.val[1]": { + "register": "Vt2.8H" + }, + "val.val[2]": { + "register": "Vt3.8H" + }, + "val.val[3]": { "register": "Vt4.8H" } }, @@ -109453,7 +316626,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.4S" + }, + "val.val[1]": { + "register": "Vt2.4S" + }, + "val.val[2]": { + "register": "Vt3.4S" + }, + "val.val[3]": { "register": "Vt4.4S" } }, @@ -109482,7 +316664,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.2D" + }, + "val.val[1]": { + "register": "Vt2.2D" + }, + "val.val[2]": { + "register": "Vt3.2D" + }, + "val.val[3]": { "register": "Vt4.2D" } }, @@ -109509,7 +316700,16 @@ "ptr": { "register": "Xn" }, - "val": { + "val.val[0]": { + "register": "Vt.16B" + }, + "val.val[1]": { + "register": "Vt2.16B" + }, + "val.val[2]": { + "register": "Vt3.16B" + }, + "val.val[3]": { "register": "Vt4.16B" } }, @@ -109526,38 +316726,106 @@ }, { "SIMD_ISA": "Neon", - "name": "vstrq_p128", + "name": "vstl1_lane_f64", "arguments": [ - "poly128_t * ptr", - "poly128_t val" + "float64_t * ptr", + "float64x1_t val", + "const int lane" ], "return_type": { "value": "void" }, "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, "ptr": { "register": "Xn" }, "val": { - "register": "Qt" + "register": "Vt.1D" } }, "Architectures": [ - "A32", "A64" ], "instructions": [ [ - "STR" + "STL1" ] ] }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_f64", + "name": "vstl1_lane_p64", "arguments": [ - "float64_t * ptr", - "float64x1_t val", + "poly64_t * ptr", + "poly64x1_t val", + "const int lane" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STL1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vstl1_lane_s64", + "arguments": [ + "int64_t * ptr", + "int64x1_t val", + "const int lane" + ], + "return_type": { + "value": "void" + }, + "Arguments_Preparation": { + "lane": { + "minimum": 0, + "maximum": 0 + }, + "ptr": { + "register": "Xn" + }, + "val": { + "register": "Vt.1D" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "STL1" + ] + ] + }, + { + "SIMD_ISA": "Neon", + "name": "vstl1_lane_u64", + "arguments": [ + "uint64_t * ptr", + "uint64x1_t val", "const int lane" ], "return_type": { @@ -109616,38 +316884,6 @@ ] ] }, - { - "SIMD_ISA": "Neon", - "name": "vstl1_lane_p64", - "arguments": [ - "poly64_t * ptr", - "poly64x1_t val", - "const int lane" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.1D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STL1" - ] - ] - }, { "SIMD_ISA": "Neon", "name": "vstl1q_lane_p64", @@ -109682,10 +316918,10 @@ }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_u64", + "name": "vstl1q_lane_s64", "arguments": [ - "uint64_t * ptr", - "uint64x1_t val", + "int64_t * ptr", + "int64x2_t val", "const int lane" ], "return_type": { @@ -109694,13 +316930,13 @@ "Arguments_Preparation": { "lane": { "minimum": 0, - "maximum": 0 + "maximum": 1 }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.1D" + "register": "Vt.2D" } }, "Architectures": [ @@ -109746,65 +316982,29 @@ }, { "SIMD_ISA": "Neon", - "name": "vstl1_lane_s64", + "name": "vstrq_p128", "arguments": [ - "int64_t * ptr", - "int64x1_t val", - "const int lane" + "poly128_t * ptr", + "poly128_t val" ], "return_type": { "value": "void" }, "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 0 - }, "ptr": { "register": "Xn" }, "val": { - "register": "Vt.1D" + "register": "Qt" } }, "Architectures": [ + "A32", "A64" ], "instructions": [ [ - "STL1" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vstl1q_lane_s64", - "arguments": [ - "int64_t * ptr", - "int64x2_t val", - "const int lane" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "lane": { - "minimum": 0, - "maximum": 1 - }, - "ptr": { - "register": "Xn" - }, - "val": { - "register": "Vt.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STL1" + "STR" ] ] }, @@ -111655,7 +318855,6 @@ } }, "Architectures": [ - "A32", "A64" ], "instructions": [ @@ -111711,10 +318910,12 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111738,10 +318939,12 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111765,10 +318968,12 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111792,10 +318997,12 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111819,10 +319026,12 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111846,10 +319055,12 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111873,10 +319084,15 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" + }, + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111900,10 +319116,15 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" + }, + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111927,10 +319148,15 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "Zeros(64):a.val[2]": { + "register": "Vn+1.16B" + }, + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111954,10 +319180,15 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -111981,10 +319212,15 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112008,10 +319244,15 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": { + "a.val[1]:a.val[0]": { "register": "Vn.16B" }, - "idx": {} + "a.val[3]:a.val[2]": { + "register": "Vn+1.16B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112036,11 +319277,15 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b": { "register": "Vn.16B" }, - "idx": {} + "a": { + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112068,11 +319313,15 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b": { "register": "Vn.16B" }, - "idx": {} + "a": { + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112100,11 +319349,15 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b": { "register": "Vn.16B" }, - "idx": {} + "a": { + "register": "Vd.8B" + }, + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112132,11 +319385,15 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112161,11 +319418,15 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112190,11 +319451,15 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112219,11 +319484,18 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112251,11 +319523,18 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112283,11 +319562,18 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "Zeros(64):b.val[2]": { + "register": "Vn+1.16B" + }, + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "idx": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112315,11 +319601,18 @@ "value": "poly8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112344,11 +319637,18 @@ "value": "int8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -112373,11 +319673,18 @@ "value": "uint8x8_t" }, "Arguments_Preparation": { - "a": {}, - "b": { + "a": { + "register": "Vd.8B" + }, + "b.val[1]:b.val[0]": { "register": "Vn.16B" }, - "idx": {} + "b.val[3]:b.val[2]": { + "register": "Vn+1.16B" + }, + "c": { + "register": "Vm.8B" + } }, "Architectures": [ "v7", @@ -115435,6 +322742,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -117386,7 +324694,9 @@ "a": { "register": "Vn.2D" }, - "b": {}, + "b": { + "register": "Vm.2D" + }, "imm6": { "minimum": 0, "maximum": 63 @@ -119297,1746 +326607,6 @@ ] ] }, - { - "SIMD_ISA": "Neon", - "name": "vamin_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMIN" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vaminq_f16", - "arguments": [ - "float16x8_t a", - "float16x8_t b" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMIN" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamin_f32", - "arguments": [ - "float32x2_t a", - "float32x2_t b" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMIN" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vaminq_f32", - "arguments": [ - "float32x4_t a", - "float32x4_t b" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMIN" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vaminq_f64", - "arguments": [ - "float64x2_t a", - "float64x2_t b" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMIN" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamax_f16", - "arguments": [ - "float16x4_t a", - "float16x4_t b" - ], - "return_type": { - "value": "float16x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4H" - }, - "b": { - "register": "Vm.4H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamaxq_f16", - "arguments": [ - "float16x8_t a", - "float16x8_t b" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamax_f32", - "arguments": [ - "float32x2_t a", - "float32x2_t b" - ], - "return_type": { - "value": "float32x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2S" - }, - "b": { - "register": "Vm.2S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamaxq_f32", - "arguments": [ - "float32x4_t a", - "float32x4_t b" - ], - "return_type": { - "value": "float32x4_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.4S" - }, - "b": { - "register": "Vm.4S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vamaxq_f64", - "arguments": [ - "float64x2_t a", - "float64x2_t b" - ], - "return_type": { - "value": "float64x2_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.2D" - }, - "b": { - "register": "Vm.2D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "FAMAX" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_f16", - "arguments": [ - "float16x4_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_s16", - "arguments": [ - "int16x4_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_u16", - "arguments": [ - "uint16x4_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "lane": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_p16", - "arguments": [ - "poly16x4_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_f16", - "arguments": [ - "float16x4_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s16", - "arguments": [ - "int16x4_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u16", - "arguments": [ - "uint16x4_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p16", - "arguments": [ - "poly16x4_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_f16", - "arguments": [ - "float16x8_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s16", - "arguments": [ - "int16x8_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u16", - "arguments": [ - "uint16x8_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p16", - "arguments": [ - "poly16x8_t a", - "uint8x8_t b", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_f16", - "arguments": [ - "float16x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s16", - "arguments": [ - "int16x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u16", - "arguments": [ - "uint16x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_u8", - "arguments": [ - "uint8x8_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_u8", - "arguments": [ - "uint8x16_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_s8", - "arguments": [ - "int8x8_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_s8", - "arguments": [ - "int8x16_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_lane_p8", - "arguments": [ - "poly8x8_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_lane_p8", - "arguments": [ - "poly8x16_t a", - "uint8x8_t b", - "const int lane" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_u8", - "arguments": [ - "uint8x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_u8", - "arguments": [ - "uint8x16_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_s8", - "arguments": [ - "int8x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_s8", - "arguments": [ - "int8x16_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2_laneq_p8", - "arguments": [ - "poly8x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p8", - "arguments": [ - "poly8x16_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.16B" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti2q_laneq_p16", - "arguments": [ - "poly16x8_t a", - "uint8x16_t b", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "a": { - "register": "Vn.8H" - }, - "b": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 7 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI2" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u8", - "arguments": [ - "uint8x16_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u8", - "arguments": [ - "uint8x16_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "uint8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s8", - "arguments": [ - "int8x16_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s8", - "arguments": [ - "int8x16_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "int8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p8", - "arguments": [ - "poly8x16_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 0 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p8", - "arguments": [ - "poly8x16_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "poly8x16_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn.16B" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.16B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_u16_x2", - "arguments": [ - "uint16x8x2_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_u16_x2", - "arguments": [ - "uint16x8x2_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "uint16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_s16_x2", - "arguments": [ - "int16x8x2_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_s16_x2", - "arguments": [ - "int16x8x2_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "int16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_f16_x2", - "arguments": [ - "float16x8x2_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_f16_x2", - "arguments": [ - "float16x8x2_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "float16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_lane_p16_x2", - "arguments": [ - "poly16x8x2_t vn", - "uint8x8_t vm", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 1 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, - { - "SIMD_ISA": "Neon", - "name": "vluti4q_laneq_p16_x2", - "arguments": [ - "poly16x8x2_t vn", - "uint8x16_t vm", - "const int index" - ], - "return_type": { - "value": "poly16x8_t" - }, - "Arguments_Preparation": { - "vn": { - "register": "Vn1.8H" - }, - "vm": { - "register": "Vm" - }, - "index": { - "minimum": 0, - "maximum": 3 - }, - "r": { - "register": "Vd.8H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LUTI4" - ] - ] - }, { "SIMD_ISA": "Neon", "name": "__jcvt", From a370aa3251019ef319cf76bef1b7cc333cbd3ec3 Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 16 Jan 2026 12:46:33 +0000 Subject: [PATCH 13/20] stdarch-verify: support sve Co-authored-by: Adam Gemmell Co-authored-by: Jamie Cunliffe Co-authored-by: Jacob Bramley Co-authored-by: Luca Vizzarro --- .../stdarch/crates/stdarch-verify/src/lib.rs | 67 +++++- .../crates/stdarch-verify/tests/arm.rs | 195 +++++++++++++++--- 2 files changed, 225 insertions(+), 37 deletions(-) diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs index c81f5f45bcce..f7304ab32685 100644 --- a/library/stdarch/crates/stdarch-verify/src/lib.rs +++ b/library/stdarch/crates/stdarch-verify/src/lib.rs @@ -120,6 +120,13 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { ); } + // Newer intrinsics don't have `rustc_legacy_const_generics` - assume they belong at + // the end of the argument list + if required_const.is_empty() && legacy_const_generics.is_empty() { + legacy_const_generics = + (arguments.len()..(arguments.len() + const_arguments.len())).collect(); + } + // The list of required consts, used to verify the arguments, comes from either the // `rustc_args_required_const` or the `rustc_legacy_const_generics` attribute. let required_const = if required_const.is_empty() { @@ -136,14 +143,14 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { arguments.insert(idx, ty); } - // strip leading underscore from fn name when building a test - // _mm_foo -> mm_foo such that the test name is test_mm_foo. - let test_name_string = format!("{name}"); - let mut test_name_id = test_name_string.as_str(); - while test_name_id.starts_with('_') { - test_name_id = &test_name_id[1..]; - } - let has_test = tests.contains(&format!("test_{test_name_id}")); + // Strip leading underscore from fn name when building a test + // `_mm_foo` -> `mm_foo` such that the test name is `test_mm_foo`. + let test_name = name.to_string(); + let test_name = test_name.trim_start_matches('_'); + let has_test = tests.contains(&format!("test_{test_name}")) + // SVE load/store tests start with `test` or `_with_` + || tests.iter().any(|t| t.starts_with(&format!("test_{test_name}")) + || t.ends_with(&format!("_with_{test_name}"))); let doc = find_doc(&f.attrs); @@ -347,6 +354,50 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "v4f32" => quote! { &v4f32 }, "v2f64" => quote! { &v2f64 }, + "svbool_t" => quote! { &SVBOOL }, + "svint8_t" => quote! { &SVI8 }, + "svint8x2_t" => quote! { &SVI8X2 }, + "svint8x3_t" => quote! { &SVI8X3 }, + "svint8x4_t" => quote! { &SVI8X4 }, + "svint16_t" => quote! { &SVI16 }, + "svint16x2_t" => quote! { &SVI16X2 }, + "svint16x3_t" => quote! { &SVI16X3 }, + "svint16x4_t" => quote! { &SVI16X4 }, + "svint32_t" => quote! { &SVI32 }, + "svint32x2_t" => quote! { &SVI32X2 }, + "svint32x3_t" => quote! { &SVI32X3 }, + "svint32x4_t" => quote! { &SVI32X4 }, + "svint64_t" => quote! { &SVI64 }, + "svint64x2_t" => quote! { &SVI64X2 }, + "svint64x3_t" => quote! { &SVI64X3 }, + "svint64x4_t" => quote! { &SVI64X4 }, + "svuint8_t" => quote! { &SVU8 }, + "svuint8x2_t" => quote! { &SVU8X2 }, + "svuint8x3_t" => quote! { &SVU8X3 }, + "svuint8x4_t" => quote! { &SVU8X4 }, + "svuint16_t" => quote! { &SVU16 }, + "svuint16x2_t" => quote! { &SVU16X2 }, + "svuint16x3_t" => quote! { &SVU16X3 }, + "svuint16x4_t" => quote! { &SVU16X4 }, + "svuint32_t" => quote! { &SVU32 }, + "svuint32x2_t" => quote! { &SVU32X2 }, + "svuint32x3_t" => quote! { &SVU32X3 }, + "svuint32x4_t" => quote! { &SVU32X4 }, + "svuint64_t" => quote! { &SVU64 }, + "svuint64x2_t" => quote! { &SVU64X2 }, + "svuint64x3_t" => quote! { &SVU64X3 }, + "svuint64x4_t" => quote! { &SVU64X4 }, + "svfloat32_t" => quote! { &SVF32 }, + "svfloat32x2_t" => quote! { &SVF32X2 }, + "svfloat32x3_t" => quote! { &SVF32X3 }, + "svfloat32x4_t" => quote! { &SVF32X4 }, + "svfloat64_t" => quote! { &SVF64 }, + "svfloat64x2_t" => quote! { &SVF64X2 }, + "svfloat64x3_t" => quote! { &SVF64X3 }, + "svfloat64x4_t" => quote! { &SVF64X4 }, + "svprfop" => quote! { &SVPRFOP }, + "svpattern" => quote! { &SVPATTERN }, + // Generic types "T" => quote! { &GENERICT }, "U" => quote! { &GENERICU }, diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index c5744de3f644..a37af2222a5d 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -16,6 +16,7 @@ struct Function { doc: &'static str, } +static BOOL: Type = Type::PrimBool; static F16: Type = Type::PrimFloat(16); static F32: Type = Type::PrimFloat(32); static F64: Type = Type::PrimFloat(64); @@ -28,6 +29,7 @@ struct Function { static U64: Type = Type::PrimUnsigned(64); static U8: Type = Type::PrimUnsigned(8); static NEVER: Type = Type::Never; +static VOID: Type = Type::Void; static GENERICT: Type = Type::GenericParam("T"); static GENERICU: Type = Type::GenericParam("U"); @@ -151,19 +153,78 @@ struct Function { static U8X8X3: Type = Type::U(8, 8, 3); static U8X8X4: Type = Type::U(8, 8, 4); +static SVBOOL: Type = Type::Pred(1); +static SVBOOLX2: Type = Type::Pred(2); +static SVBOOLX3: Type = Type::Pred(3); +static SVBOOLX4: Type = Type::Pred(4); +static SVCOUNT: Type = Type::Pred(1); +static SVF16: Type = Type::SVF(16, 1); +static SVF16X2: Type = Type::SVF(16, 2); +static SVF16X3: Type = Type::SVF(16, 3); +static SVF16X4: Type = Type::SVF(16, 4); +static SVF32: Type = Type::SVF(32, 1); +static SVF32X2: Type = Type::SVF(32, 2); +static SVF32X3: Type = Type::SVF(32, 3); +static SVF32X4: Type = Type::SVF(32, 4); +static SVF64: Type = Type::SVF(64, 1); +static SVF64X2: Type = Type::SVF(64, 2); +static SVF64X3: Type = Type::SVF(64, 3); +static SVF64X4: Type = Type::SVF(64, 4); +static SVI8: Type = Type::SVI(8, 1); +static SVI8X2: Type = Type::SVI(8, 2); +static SVI8X3: Type = Type::SVI(8, 3); +static SVI8X4: Type = Type::SVI(8, 4); +static SVI16: Type = Type::SVI(16, 1); +static SVI16X2: Type = Type::SVI(16, 2); +static SVI16X3: Type = Type::SVI(16, 3); +static SVI16X4: Type = Type::SVI(16, 4); +static SVI32: Type = Type::SVI(32, 1); +static SVI32X2: Type = Type::SVI(32, 2); +static SVI32X3: Type = Type::SVI(32, 3); +static SVI32X4: Type = Type::SVI(32, 4); +static SVI64: Type = Type::SVI(64, 1); +static SVI64X2: Type = Type::SVI(64, 2); +static SVI64X3: Type = Type::SVI(64, 3); +static SVI64X4: Type = Type::SVI(64, 4); +static SVU8: Type = Type::SVU(8, 1); +static SVU8X2: Type = Type::SVU(8, 2); +static SVU8X3: Type = Type::SVU(8, 3); +static SVU8X4: Type = Type::SVU(8, 4); +static SVU16: Type = Type::SVU(16, 1); +static SVU16X2: Type = Type::SVU(16, 2); +static SVU16X3: Type = Type::SVU(16, 3); +static SVU16X4: Type = Type::SVU(16, 4); +static SVU32: Type = Type::SVU(32, 1); +static SVU32X2: Type = Type::SVU(32, 2); +static SVU32X3: Type = Type::SVU(32, 3); +static SVU32X4: Type = Type::SVU(32, 4); +static SVU64: Type = Type::SVU(64, 1); +static SVU64X2: Type = Type::SVU(64, 2); +static SVU64X3: Type = Type::SVU(64, 3); +static SVU64X4: Type = Type::SVU(64, 4); +static SVPRFOP: Type = Type::Enum("svprfop"); +static SVPATTERN: Type = Type::Enum("svpattern"); + #[derive(Debug, Copy, Clone, PartialEq)] enum Type { + Void, + PrimBool, PrimFloat(u8), PrimSigned(u8), PrimUnsigned(u8), PrimPoly(u8), MutPtr(&'static Type), ConstPtr(&'static Type), + Enum(&'static str), GenericParam(&'static str), I(u8, u8, u8), U(u8, u8, u8), P(u8, u8, u8), F(u8, u8, u8), + Pred(u8), + SVI(u8, u8), + SVU(u8, u8), + SVF(u8, u8), Never, } @@ -182,19 +243,18 @@ fn verify_all_signatures() { let mut all_valid = true; for rust in FUNCTIONS { + // Most SVE intrinsics just rely on the intrinsics test tool for validation if !rust.has_test { - if !SKIP_RUNTIME_TESTS.contains(&rust.name) { - println!( - "missing run-time test named `test_{}` for `{}`", - { - let mut id = rust.name; - while id.starts_with('_') { - id = &id[1..]; - } - id - }, - rust.name - ); + if !SKIP_RUNTIME_TESTS.contains(&rust.name) + // Most run-time tests are handled by the intrinsic-test tool, except for + // load/stores (which have generated tests) + && (!rust.name.starts_with("sv") || rust.name.starts_with("svld") + || rust.name.starts_with("svst")) + // The load/store test generator can't handle these cases yet + && (!rust.name.contains("_u32base_") || rust.name.contains("index") || rust.name.contains("offset")) + && !(rust.name.starts_with("svldff1") && rust.name.contains("gather")) + { + println!("missing run-time test for `{}`", rust.name); all_valid = false; } } @@ -269,12 +329,21 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { let mut nconst = 0; let iter = rust.arguments.iter().zip(&arm.arguments).enumerate(); for (i, (rust_ty, (arm, arm_const))) in iter { - if *rust_ty != arm { - bail!("mismatched arguments: {rust_ty:?} != {arm:?}") + match (*rust_ty, arm) { + // SVE uses generic type parameters to handle void pointers + (Type::ConstPtr(Type::GenericParam("T")), Type::ConstPtr(Type::Void)) => (), + // SVE const generics use i32 over u64 for usability reasons + (Type::PrimSigned(32), Type::PrimUnsigned(64)) if rust.required_const.contains(&i) => { + () + } + // svset doesn't have its const argument last as we assumed when building the Function + _ if rust.name.starts_with("svset") => (), + (x, y) if x == y => (), + _ => bail!("mismatched arguments: {rust_ty:?} != {arm:?}"), } if *arm_const { nconst += 1; - if !rust.required_const.contains(&i) { + if !rust.required_const.contains(&i) && !rust.name.starts_with("svset") { bail!("argument const mismatch"); } } @@ -283,7 +352,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { bail!("wrong number of const arguments"); } - if rust.instrs.is_empty() { + if rust.instrs.is_empty() && arm.instruction != "" { bail!( "instruction not listed for `{}`, but arm lists {:?}", rust.name, @@ -322,7 +391,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { Ok(()) } -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] struct Intrinsic { name: String, ret: Option, @@ -337,7 +406,7 @@ struct JsonIntrinsic { arguments: Vec, return_type: ReturnType, #[serde(default)] - instructions: Vec>, + instructions: Option>>, } #[derive(Deserialize, Debug)] @@ -356,6 +425,8 @@ fn parse_intrinsics(intrinsics: Vec) -> HashMap Intrinsic { let name = intr.name; + // Remove '[' and ']' so that intrinsics of the form `svwhilerw[_s16]` becomes `svwhilerw_s16`. + let name = name.replace('[', "").replace(']', ""); let ret = if intr.return_type.value == "void" { None } else { @@ -364,18 +435,24 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { // This ignores multiple instructions and different optional sequences for now to mimic // the old HTML scraping behaviour - let instruction = intr.instructions.swap_remove(0).swap_remove(0); + let instruction = intr + .instructions + .map_or(String::new(), |mut i| i.swap_remove(0).swap_remove(0)); let arguments = intr .arguments .iter() .map(|s| { - let (ty, konst) = match s.strip_prefix("const") { - Some(stripped) => (stripped.trim_start(), true), - None => (s.as_str(), false), + let ty = if let Some(i) = s.find('*') { + &s[..i + 1] + } else { + s.rsplit_once(' ').unwrap().0.trim_start_matches("const ") }; - let ty = ty.rsplit_once(' ').unwrap().0; - (parse_ty(ty), konst) + let ty = parse_ty(ty); + let konst = s.contains("const") && !matches!(ty, Type::ConstPtr(_)) + || s.starts_with("enum") + || s.rsplit_once(" ").unwrap().1.starts_with("imm"); + (ty, konst) }) .collect::>(); @@ -388,18 +465,27 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic { } fn parse_ty(s: &str) -> Type { - let suffix = " const *"; - if let Some(base) = s.strip_suffix(suffix) { - Type::ConstPtr(parse_ty_base(base)) - } else if let Some(base) = s.strip_suffix(" *") { - Type::MutPtr(parse_ty_base(base)) + if let Some(ty) = s.strip_suffix("*") { + let ty = ty.trim(); + if let Some(ty) = ty.strip_prefix("const") { + // SVE intrinsics are west-const (`const int8_t *`) + Type::ConstPtr(parse_ty_base(ty)) + } else if let Some(ty) = ty.strip_suffix("const") { + // Neon intrinsics are east-const (`int8_t const *`) + Type::ConstPtr(parse_ty_base(ty)) + } else { + Type::MutPtr(parse_ty_base(ty)) + } } else { *parse_ty_base(s) } } fn parse_ty_base(s: &str) -> &'static Type { + let s = s.trim(); match s { + "bool" => &BOOL, + "void" => &VOID, "float16_t" => &F16, "float16x4_t" => &F16X4, "float16x4x2_t" => &F16X4X2, @@ -529,6 +615,57 @@ fn parse_ty_base(s: &str) -> &'static Type { "uint8x8x2_t" => &U8X8X2, "uint8x8x3_t" => &U8X8X3, "uint8x8x4_t" => &U8X8X4, + "svbool_t" => &SVBOOL, + "svboolx2_t" => &SVBOOLX2, + "svboolx3_t" => &SVBOOLX3, + "svboolx4_t" => &SVBOOLX4, + "svcount_t" => &SVCOUNT, + "svfloat16_t" => &SVF16, + "svfloat16x2_t" => &SVF16X2, + "svfloat16x3_t" => &SVF16X3, + "svfloat16x4_t" => &SVF16X4, + "svfloat32_t" => &SVF32, + "svfloat32x2_t" => &SVF32X2, + "svfloat32x3_t" => &SVF32X3, + "svfloat32x4_t" => &SVF32X4, + "svfloat64_t" => &SVF64, + "svfloat64x2_t" => &SVF64X2, + "svfloat64x3_t" => &SVF64X3, + "svfloat64x4_t" => &SVF64X4, + "svint8_t" => &SVI8, + "svint8x2_t" => &SVI8X2, + "svint8x3_t" => &SVI8X3, + "svint8x4_t" => &SVI8X4, + "svint16_t" => &SVI16, + "svint16x2_t" => &SVI16X2, + "svint16x3_t" => &SVI16X3, + "svint16x4_t" => &SVI16X4, + "svint32_t" => &SVI32, + "svint32x2_t" => &SVI32X2, + "svint32x3_t" => &SVI32X3, + "svint32x4_t" => &SVI32X4, + "svint64_t" => &SVI64, + "svint64x2_t" => &SVI64X2, + "svint64x3_t" => &SVI64X3, + "svint64x4_t" => &SVI64X4, + "svuint8_t" => &SVU8, + "svuint8x2_t" => &SVU8X2, + "svuint8x3_t" => &SVU8X3, + "svuint8x4_t" => &SVU8X4, + "svuint16_t" => &SVU16, + "svuint16x2_t" => &SVU16X2, + "svuint16x3_t" => &SVU16X3, + "svuint16x4_t" => &SVU16X4, + "svuint32_t" => &SVU32, + "svuint32x2_t" => &SVU32X2, + "svuint32x3_t" => &SVU32X3, + "svuint32x4_t" => &SVU32X4, + "svuint64_t" => &SVU64, + "svuint64x2_t" => &SVU64X2, + "svuint64x3_t" => &SVU64X3, + "svuint64x4_t" => &SVU64X4, + "enum svprfop" => &SVPRFOP, + "enum svpattern" => &SVPATTERN, _ => panic!("failed to parse json type {s:?}"), } From b6b2ce3d44220c77223a25c20cf99cb98732ab72 Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 14/20] core_arch: no SVE on arm64ec arm64ec doesn't support SVE. --- library/stdarch/crates/core_arch/src/aarch64/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/aarch64/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/mod.rs index 9376e04b3b53..0292be2e0d77 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/mod.rs @@ -25,11 +25,17 @@ #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub use self::neon::*; +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] mod sve; +#[cfg(any(target_arch = "aarch64", doc))] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub use self::sve::*; +// The rest of `core_arch::aarch64` is available on `arm64ec` but SVE is not supported on `arm64ec`. +#[cfg(any(target_arch = "aarch64", doc))] mod sve2; +#[cfg(any(target_arch = "aarch64", doc))] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub use self::sve2::*; From c21d4e99eabe94f3270c691979d35a9e0dd9b4ae Mon Sep 17 00:00:00 2001 From: David Wood Date: Wed, 4 Mar 2026 14:16:40 +0000 Subject: [PATCH 15/20] intrinsic-test: update parsing for SVE intrinsics With SVE intrinsics in the `arm_intrinsics.json`, the parsing needs to be updated to know to expect any new fields. --- library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs index 65c179ef0d08..c1563a7364ce 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/json_parser.rs @@ -12,6 +12,8 @@ #[serde(deny_unknown_fields)] struct ReturnType { value: String, + #[serde(rename = "element_bit_size")] + _element_bit_size: Option, } #[derive(Deserialize, Debug)] @@ -50,6 +52,8 @@ struct JsonIntrinsic { args_prep: Option>, #[serde(rename = "Architectures")] architectures: Vec, + #[serde(rename = "instructions")] + _instructions: Option>>, } pub fn get_neon_intrinsics( From 88b49085833e4a0ee42b4b606cdbda48434e38ca Mon Sep 17 00:00:00 2001 From: David Wood Date: Thu, 9 Apr 2026 09:14:28 +0000 Subject: [PATCH 16/20] assert-instr: support type generics SVE intrinsics have both type and const generics and so the `assert_instr` macro needs to be able to generate test cases with the type generics instantiated with the types provided in the attribute. Co-authored-by: Jamie Cunliffe Co-authored-by: Luca Vizzarro Co-authored-by: Adam Gemmell Co-authored-by: Jacob Bramley --- .../crates/assert-instr-macro/src/lib.rs | 79 +++++++++++++++---- 1 file changed, 65 insertions(+), 14 deletions(-) diff --git a/library/stdarch/crates/assert-instr-macro/src/lib.rs b/library/stdarch/crates/assert-instr-macro/src/lib.rs index 13c3c3851b43..839aae67cb2b 100644 --- a/library/stdarch/crates/assert-instr-macro/src/lib.rs +++ b/library/stdarch/crates/assert-instr-macro/src/lib.rs @@ -14,6 +14,7 @@ use proc_macro2::TokenStream; use quote::ToTokens; +use syn::spanned::Spanned; #[proc_macro_attribute] pub fn assert_instr( @@ -67,21 +68,21 @@ pub fn assert_instr( ); let mut inputs = Vec::new(); let mut input_vals = Vec::new(); - let mut const_vals = Vec::new(); + let mut param_vals = Vec::new(); let ret = &func.sig.output; for arg in func.sig.inputs.iter() { let capture = match *arg { - syn::FnArg::Typed(ref c) => c, + syn::FnArg::Typed(ref c) => c.to_owned(), ref v => panic!( "arguments must not have patterns: `{:?}`", v.clone().into_token_stream() ), }; - let ident = match *capture.pat { - syn::Pat::Ident(ref i) => &i.ident, + let ident = match capture.pat.as_ref() { + syn::Pat::Ident(i) => &i.ident.to_owned(), _ => panic!("must have bare arguments"), }; - if let Some((_, tokens)) = invoc.args.iter().find(|a| *ident == a.0) { + if let Some(&(_, ref tokens)) = invoc.args.iter().find(|a| *ident == a.0) { input_vals.push(quote! { #tokens }); } else { inputs.push(capture); @@ -89,18 +90,48 @@ pub fn assert_instr( } } for arg in func.sig.generics.params.iter() { - let c = match *arg { - syn::GenericParam::Const(ref c) => c, + match *arg { + syn::GenericParam::Const(ref c) => { + if let Some((_, tokens)) = invoc.args.iter().find(|a| c.ident == a.0) { + param_vals.push(quote! { #tokens }); + } else { + panic!("const generics must have a value for tests"); + } + } + syn::GenericParam::Type(ref t) => { + if let Some((_, tokens)) = invoc.args.iter().find(|a| t.ident == a.0) + && let syn::Expr::Path(syn::ExprPath { qself, path, .. }) = tokens + { + param_vals.push(syn::Token![_](tokens.span()).to_token_stream()); + + let generic_ty_value = syn::TypePath { + qself: qself.clone(), + path: path.clone(), + }; + + // Replace any function arguments that use generic parameters with the + // instantiation provided in the macro invocation. + inputs.iter_mut().for_each(|arg| { + update_type_path(arg.ty.as_mut(), |type_path: &mut syn::TypePath| { + if let Some(syn::PathSegment { + ident: last_ident, .. + }) = type_path.path.segments.last_mut() + { + if *last_ident == t.ident { + *type_path = generic_ty_value.to_owned() + } + } + }) + }); + } else { + panic!("type generics must have a type for tests"); + } + } ref v => panic!( - "only const generics are allowed: `{:?}`", + "only type and const generics are allowed: `{:?}`", v.clone().into_token_stream() ), }; - if let Some((_, tokens)) = invoc.args.iter().find(|a| c.ident == a.0) { - const_vals.push(quote! { #tokens }); - } else { - panic!("const generics must have a value for tests"); - } } let attrs = func @@ -138,7 +169,7 @@ pub fn assert_instr( #[unsafe(no_mangle)] #[inline(never)] pub unsafe extern #abi fn #shim_name(#(#inputs),*) #ret { - #name::<#(#const_vals),*>(#(#input_vals),*) + #name::<#(#param_vals),*>(#(#input_vals),*) } }; @@ -222,3 +253,23 @@ fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { } } } + +/// Calls `update` on type paths so that type generics can be replaced with the instantiation from +/// the attribute. +fn update_type_path(ty: &mut syn::Type, update: F) +where + F: Fn(&mut syn::TypePath), +{ + use syn::Type::*; + match ty { + Array(syn::TypeArray { elem, .. }) + | Group(syn::TypeGroup { elem, .. }) + | Paren(syn::TypeParen { elem, .. }) + | Ptr(syn::TypePtr { elem, .. }) + | Reference(syn::TypeReference { elem, .. }) + | Slice(syn::TypeSlice { elem, .. }) => update_type_path(elem.as_mut(), update), + Path(path @ syn::TypePath { .. }) => update(path), + Tuple(..) => panic!("tuples and generic types together are not yet supported"), + _ => {} + } +} From acb48ca2cac50ca659abaa1b041ad219215bbd7c Mon Sep 17 00:00:00 2001 From: David Wood Date: Mon, 13 Apr 2026 04:42:58 +0000 Subject: [PATCH 17/20] gen-arm: disable `assert_instr` for `pfalse` The implementation for this has the same behaviour as a `pfalse` but doesn't currently emit one until an intrinsic is added to emit a `zeroinitializer` for this. --- .../crates/core_arch/src/aarch64/sve/generated.rs | 1 - .../crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml | 4 +++- library/stdarch/crates/stdarch-verify/tests/arm.rs | 10 +++++++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index 6edfc8e159a7..ed28e98a813e 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -30819,7 +30819,6 @@ pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { #[inline(always)] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(pfalse))] pub fn svpfalse_b() -> svbool_t { svdupq_n_b8( false, false, false, false, false, false, false, false, false, false, false, false, false, diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml index 1fad8bb371f9..383e50b7cc70 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -3611,7 +3611,9 @@ intrinsics: doc: Set all predicate elements to false arguments: [] return_type: "svbool_t" - assert_instr: [pfalse] + # TODO: With current implementation, `pfalse` isn't generated, will need to add intrinsic to + # generate `zeroinitializer` + # assert_instr: [pfalse] compose: - FnCall: - "svdupq_n_b8" diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index a37af2222a5d..2242bf4264e5 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -352,7 +352,10 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> { bail!("wrong number of const arguments"); } - if rust.instrs.is_empty() && arm.instruction != "" { + if rust.instrs.is_empty() + && arm.instruction != "" + && !SKIP_ASSERT_INSTR_TESTS.contains(&rust.name) + { bail!( "instruction not listed for `{}`, but arm lists {:?}", rust.name, @@ -671,6 +674,11 @@ fn parse_ty_base(s: &str) -> &'static Type { } } +// FIXME(arm-maintainers): Some tests require new rustc intrinsics in order to generate +// the appropriate instruction, though they do have the correct behaviour - these will be fixed +// but are disabled for now. +static SKIP_ASSERT_INSTR_TESTS: &'static [&'static str] = &["svpfalse_b"]; + // FIXME(arm-maintainers): With the advent of the `intrinsic-test` tool, new tests of this kind // are no longer being added and just adding to this list indefinitely isn't the best solution for // dealing with that. From e6c0129553cf1e8605b6ece59984c7c29a4380d2 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 00:03:19 +0000 Subject: [PATCH 18/20] stdarch-test: `[us]shll[tb]` have no aliases SVE's `[us]shll[tb]` intructions have no aliases unlike Neon's `[us]hll{2}` so this logic needs adjusted to not accidentally rewrite the instruction. --- .../crates/stdarch-test/src/disassembly.rs | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs index 237e8d2dc28a..7cf657baa0d7 100644 --- a/library/stdarch/crates/stdarch-test/src/disassembly.rs +++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs @@ -158,16 +158,26 @@ fn parse(output: &str) -> HashSet { }; if cfg!(any(target_arch = "aarch64", target_arch = "arm64ec")) { - // Normalize [us]shll.* ..., #0 instructions to the preferred form: [us]xtl.* ... - // as neither LLVM objdump nor dumpbin does that. - // See https://developer.arm.com/documentation/ddi0602/latest/SIMD-FP-Instructions/UXTL--UXTL2--Unsigned-extend-Long--an-alias-of-USHLL--USHLL2- - // and https://developer.arm.com/documentation/ddi0602/latest/SIMD-FP-Instructions/SXTL--SXTL2--Signed-extend-Long--an-alias-of-SSHLL--SSHLL2- - // for details. + // Normalize `[us]shll{2}.* ..., #0` instructions to the preferred + // form: `[us]xtl{2}.* ...` as neither LLVM objdump nor dumpbin does that. + // + // SVE has `[us]shll[tb]` instructions that don't have an equivalent alias. + // + // See Arm documentation for details: + // + // - https://developer.arm.com/documentation/ddi0602/2026-03/SIMD-FP-Instructions/UXTL--UXTL2--Unsigned-extend-long--an-alias-of-USHLL--USHLL2-?lang=en + // - https://developer.arm.com/documentation/ddi0602/2026-03/SIMD-FP-Instructions/SXTL--SXTL2--Signed-extend-long--an-alias-of-SSHLL--SSHLL2-?lang=en fn is_shll(instr: &str) -> bool { if cfg!(target_env = "msvc") { - instr.starts_with("ushll") || instr.starts_with("sshll") + instr == "ushll" + || instr == "ushll2" + || instr == "sshll" + || instr == "sshll2" } else { - instr.starts_with("ushll.") || instr.starts_with("sshll.") + instr == "ushll." + || instr == "ushll2." + || instr == "sshll." + || instr == "sshll2." } } match (parts.first(), parts.last()) { From b1818677386905dbc834f9aa1449daf2d669c718 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 01:09:25 +0000 Subject: [PATCH 19/20] gen-arm: `assert_instr` on msvc for `[su]mull[tb]` `dumpbin.exe` produces `44a1c000`/`44e1c000`/`44a1c400`/`44e1c400` for `[su]mull[tb]` instead of the instruction name - so skip `assert_instr` for these intrinsics on MSVC targets. --- .../core_arch/src/aarch64/sve2/generated.rs | 64 +++++++++++++------ .../spec/sve2/aarch64.spec.yml | 22 +++++-- 2 files changed, 60 insertions(+), 26 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs index 79be8a88890c..c5b0149c9c30 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -10281,7 +10281,10 @@ pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10298,7 +10301,10 @@ pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10315,7 +10321,10 @@ pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10332,7 +10341,10 @@ pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullb, IMM_INDEX = 0) +)] pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10481,7 +10493,10 @@ pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10498,7 +10513,10 @@ pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(smullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10515,7 +10533,10 @@ pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { static_assert_range!(IMM_INDEX, 0..=7); unsafe extern "unadjusted" { @@ -10532,7 +10553,10 @@ pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))] +#[cfg_attr( + all(test, not(target_env = "msvc")), + assert_instr(umullt, IMM_INDEX = 0) +)] pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { static_assert_range!(IMM_INDEX, 0..=3); unsafe extern "unadjusted" { @@ -10549,7 +10573,7 @@ pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")] @@ -10562,7 +10586,7 @@ pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { svmullt_s16(op1, svdup_n_s8(op2)) } @@ -10571,7 +10595,7 @@ pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")] @@ -10584,7 +10608,7 @@ pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { svmullt_s32(op1, svdup_n_s16(op2)) } @@ -10593,7 +10617,7 @@ pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")] @@ -10606,7 +10630,7 @@ pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(smullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { svmullt_s64(op1, svdup_n_s32(op2)) } @@ -10615,7 +10639,7 @@ pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")] @@ -10628,7 +10652,7 @@ pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { svmullt_u16(op1, svdup_n_u8(op2)) } @@ -10637,7 +10661,7 @@ pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")] @@ -10650,7 +10674,7 @@ pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { svmullt_u32(op1, svdup_n_u16(op2)) } @@ -10659,7 +10683,7 @@ pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { unsafe extern "unadjusted" { #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")] @@ -10672,7 +10696,7 @@ pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { #[inline(always)] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] -#[cfg_attr(test, assert_instr(umullt))] +#[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { svmullt_u64(op1, svdup_n_u32(op2)) } diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml index 6365bea21b51..269d7ff0eacb 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve2/aarch64.spec.yml @@ -10,6 +10,10 @@ generate_load_store_tests: true sve-unstable: &sve-unstable FnCall: [unstable, ['feature = "stdarch_aarch64_sve"', 'issue= "145052"']] +# `#[cfg_attr(all(test, not(target_env = "msvc"))]` +msvc-disabled: &msvc-disabled + FnCall: [all, [test, {FnCall: [not, ['target_env = "msvc"']]}]] + intrinsics: - name: svbext[{_n}_{type}] attr: [*sve-unstable] @@ -2429,7 +2433,10 @@ intrinsics: - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" } - name: svmullb_lane[_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullb` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullb", "IMM_INDEX = 0"]]}]] doc: Multiply long (bottom) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2440,7 +2447,6 @@ intrinsics: - [u64, u32] static_defs: ["const IMM_INDEX: i32"] constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] - assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]] compose: - LLVMLink: name: "{type_kind[0].su}mullb.lane.{sve_type[0]}" @@ -2449,7 +2455,10 @@ intrinsics: - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]] - name: svmullt[{_n}_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullt` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullt"]]}]] doc: Multiply long (top) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2460,13 +2469,15 @@ intrinsics: - [u16, u8] - [u32, u16] - [u64, u32] - assert_instr: ["{type_kind[0].su}mullt"] n_variant_op: op2 compose: - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" } - name: svmullt_lane[_{type[0]}] - attr: [*sve-unstable] + attr: + - *sve-unstable + # FIXME(arm-maintainers): MSVC disassembly of `[su]mullt` fails + - FnCall: [cfg_attr, [*msvc-disabled, {FnCall: [assert_instr, ["{type_kind[0].su}mullt", "IMM_INDEX = 0"]]}]] doc: Multiply long (top) arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"] return_type: "{sve_type[0]}" @@ -2477,7 +2488,6 @@ intrinsics: - [u64, u32] static_defs: ["const IMM_INDEX: i32"] constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }] - assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]] compose: - LLVMLink: name: "{type_kind[0].su}mullt.lane.{sve_type[0]}" From b0d91aa86c5344499bf5cbec62ff650521daf880 Mon Sep 17 00:00:00 2001 From: David Wood Date: Tue, 14 Apr 2026 04:05:07 +0000 Subject: [PATCH 20/20] core_arch: disable ld/st tests on msvc There seemed to be non-deterministic failures on MSVC that looked like corruption of the FFR in CI. Until this can be investigated, to avoid any spurious failures, these tests are disabled. --- library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs index a3f70ab61c40..04a92359a022 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/mod.rs @@ -374,6 +374,9 @@ pub enum svprfop { SV_PSTL3STRM = 13, } -#[cfg(test)] +// FIXME(arm-maintainers): On MSVC targets, it seemed like spurious corruption of the FFR was being +// observed non-deterministically on CI. Disabling these tests out of caution on that platform until +// it is investigated. +#[cfg(all(test, not(target_env = "msvc")))] #[path = "ld_st_tests_aarch64.rs"] mod ld_st_tests;